swmm-pandas 0.6.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- swmm/pandas/__init__.py +7 -0
- swmm/pandas/constants.py +37 -0
- swmm/pandas/input/README.md +61 -0
- swmm/pandas/input/__init__.py +2 -0
- swmm/pandas/input/_section_classes.py +2309 -0
- swmm/pandas/input/input.py +888 -0
- swmm/pandas/input/model.py +403 -0
- swmm/pandas/output/__init__.py +2 -0
- swmm/pandas/output/output.py +2580 -0
- swmm/pandas/output/structure.py +317 -0
- swmm/pandas/output/tools.py +32 -0
- swmm/pandas/py.typed +0 -0
- swmm/pandas/report/__init__.py +1 -0
- swmm/pandas/report/report.py +773 -0
- swmm_pandas-0.6.0.dist-info/METADATA +71 -0
- swmm_pandas-0.6.0.dist-info/RECORD +19 -0
- swmm_pandas-0.6.0.dist-info/WHEEL +4 -0
- swmm_pandas-0.6.0.dist-info/entry_points.txt +4 -0
- swmm_pandas-0.6.0.dist-info/licenses/LICENSE.md +157 -0
|
@@ -0,0 +1,2580 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from aenum import EnumMeta
|
|
4
|
+
import os.path
|
|
5
|
+
import warnings
|
|
6
|
+
from datetime import datetime, timedelta
|
|
7
|
+
from functools import wraps
|
|
8
|
+
from typing import Callable
|
|
9
|
+
from collections.abc import Sequence
|
|
10
|
+
from itertools import product
|
|
11
|
+
from io import SEEK_END
|
|
12
|
+
import struct
|
|
13
|
+
|
|
14
|
+
from aenum import Enum, EnumMeta, extend_enum
|
|
15
|
+
from numpy import asarray, atleast_1d, atleast_2d, concatenate, datetime64
|
|
16
|
+
from numpy import integer as npint
|
|
17
|
+
from numpy import ndarray, stack, tile, vstack
|
|
18
|
+
import numpy.core.records
|
|
19
|
+
from pandas.core.api import (
|
|
20
|
+
DataFrame,
|
|
21
|
+
DatetimeIndex,
|
|
22
|
+
Index,
|
|
23
|
+
MultiIndex,
|
|
24
|
+
Timestamp,
|
|
25
|
+
to_datetime,
|
|
26
|
+
IndexSlice,
|
|
27
|
+
)
|
|
28
|
+
from swmm.toolkit import output, shared_enum
|
|
29
|
+
|
|
30
|
+
from swmm.pandas.output.structure import Structure
|
|
31
|
+
from swmm.pandas.output.tools import arrayish, _enum_get, _enum_keys
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
def output_open_handler(func):
|
|
35
|
+
"""Checks if output file is open before running function.
|
|
36
|
+
|
|
37
|
+
Parameters
|
|
38
|
+
----------
|
|
39
|
+
func: function
|
|
40
|
+
method of Output class
|
|
41
|
+
"""
|
|
42
|
+
|
|
43
|
+
@wraps(func)
|
|
44
|
+
def inner_function(self, *args, **kwargs):
|
|
45
|
+
if not self._loaded:
|
|
46
|
+
self._open()
|
|
47
|
+
|
|
48
|
+
return func(self, *args, **kwargs)
|
|
49
|
+
|
|
50
|
+
return inner_function
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
class Output:
|
|
54
|
+
def __init__(self, binfile: str, preload: bool = False):
|
|
55
|
+
"""Base class for a SWMM binary output file.
|
|
56
|
+
|
|
57
|
+
The output object provides several options to process timeseries within binary output file.
|
|
58
|
+
|
|
59
|
+
Output files should be closed after use prevent memory leaks. Close them explicitly with
|
|
60
|
+
the `_close()` method or deleting the object using `del`, or use it with a context manager.
|
|
61
|
+
|
|
62
|
+
.. code-block:: python
|
|
63
|
+
|
|
64
|
+
# Using a the _close method
|
|
65
|
+
>>> from swmm.pandas import Output
|
|
66
|
+
>>> out = Output('tests/Model.out')
|
|
67
|
+
>>> print(out.project_size)
|
|
68
|
+
[3, 9, 8, 1, 3]
|
|
69
|
+
>>> out._close() # can also use `del out`
|
|
70
|
+
>>>
|
|
71
|
+
# Using a context manager
|
|
72
|
+
>>> with Output('tests/Model.out') as out:
|
|
73
|
+
... print(out.pollutants)
|
|
74
|
+
('groundwater', 'pol_rainfall', 'sewage')
|
|
75
|
+
|
|
76
|
+
Parameters
|
|
77
|
+
----------
|
|
78
|
+
binfile: str
|
|
79
|
+
model binary file path
|
|
80
|
+
"""
|
|
81
|
+
|
|
82
|
+
self._period: int
|
|
83
|
+
"""number of reporting time steps in the """
|
|
84
|
+
|
|
85
|
+
self._report: int
|
|
86
|
+
"""out file reporting time step in seconds"""
|
|
87
|
+
|
|
88
|
+
self._start: datetime
|
|
89
|
+
"""start datetime of the output file records"""
|
|
90
|
+
|
|
91
|
+
self._end: datetime
|
|
92
|
+
"""end datetime of the output file records"""
|
|
93
|
+
|
|
94
|
+
self._timeIndex: DatetimeIndex
|
|
95
|
+
"""DatetimeIndex to use for output timeseries"""
|
|
96
|
+
|
|
97
|
+
self._project_size: list[int]
|
|
98
|
+
"""Array of element count values [nSubcatchments, nNodes, nLinks, nSystems(1), nPollutants]"""
|
|
99
|
+
|
|
100
|
+
self._subcatchments: tuple[str, ...]
|
|
101
|
+
"""Tuple of subcatchment names in output file"""
|
|
102
|
+
|
|
103
|
+
self._links: tuple[str, ...]
|
|
104
|
+
"""Tuple of link names in output file"""
|
|
105
|
+
|
|
106
|
+
self._pollutants: tuple[str, ...]
|
|
107
|
+
"""Tuple of pollutant names in output file"""
|
|
108
|
+
|
|
109
|
+
self._handle = None
|
|
110
|
+
|
|
111
|
+
self._binfile: str = binfile
|
|
112
|
+
"""path to binary output file"""
|
|
113
|
+
|
|
114
|
+
self._delete_handle: bool = False
|
|
115
|
+
"""Indicates if output file was closed correctly"""
|
|
116
|
+
|
|
117
|
+
self._preload: bool = preload
|
|
118
|
+
|
|
119
|
+
self._loaded: bool = False
|
|
120
|
+
"""Indicates if output file was loaded correctly"""
|
|
121
|
+
|
|
122
|
+
self.subcatch_attributes = Enum(
|
|
123
|
+
"subcatch_attributes",
|
|
124
|
+
list(shared_enum.SubcatchAttribute.__members__.keys())[:-1],
|
|
125
|
+
start=0,
|
|
126
|
+
)
|
|
127
|
+
"""Subcatchment attribute enumeration: By default has
|
|
128
|
+
|
|
129
|
+
'rainfall',
|
|
130
|
+
'snow_depth',
|
|
131
|
+
'evap_loss',
|
|
132
|
+
'infil_loss',
|
|
133
|
+
'runoff_rate',
|
|
134
|
+
'gw_outflow_rate',
|
|
135
|
+
'gw_table_elev',
|
|
136
|
+
'soil_moisture'
|
|
137
|
+
"""
|
|
138
|
+
|
|
139
|
+
# need copies of enumes to extend them for pollutants
|
|
140
|
+
# basically recreate enums using the keys from shared_enum
|
|
141
|
+
# but drop POLLUT_CONC_0 for each
|
|
142
|
+
#
|
|
143
|
+
# I looked into using swmm.toolkit.output_metadata for this but it
|
|
144
|
+
# extends global enums, which could break having multiple
|
|
145
|
+
# output objects opened in the same python session if they
|
|
146
|
+
# have different pollutant names
|
|
147
|
+
|
|
148
|
+
self.node_attributes: shared_enum.NodeAttribute = Enum(
|
|
149
|
+
"node_attributes",
|
|
150
|
+
list(shared_enum.NodeAttribute.__members__.keys())[:-1],
|
|
151
|
+
start=0,
|
|
152
|
+
)
|
|
153
|
+
"""Node attribute enumeration: By default has
|
|
154
|
+
|
|
155
|
+
'invert_depth',
|
|
156
|
+
'hydraulic_head',
|
|
157
|
+
'ponded_volume',
|
|
158
|
+
'lateral_inflow',
|
|
159
|
+
'total_inflow',
|
|
160
|
+
'flooding_losses'
|
|
161
|
+
"""
|
|
162
|
+
self.link_attributes: shared_enum.LinkAttribute = Enum(
|
|
163
|
+
"link_attributes",
|
|
164
|
+
list(shared_enum.LinkAttribute.__members__.keys())[:-1],
|
|
165
|
+
start=0,
|
|
166
|
+
)
|
|
167
|
+
"""Link attribute enumeration: By default has
|
|
168
|
+
|
|
169
|
+
'flow_rate',
|
|
170
|
+
'flow_depth',
|
|
171
|
+
'flow_velocity',
|
|
172
|
+
'flow_volume',
|
|
173
|
+
'capacity',
|
|
174
|
+
|
|
175
|
+
"""
|
|
176
|
+
|
|
177
|
+
self.system_attributes = shared_enum.SystemAttribute
|
|
178
|
+
"""System attribute enumeration: By default has
|
|
179
|
+
|
|
180
|
+
'air_temp',
|
|
181
|
+
'rainfall',
|
|
182
|
+
'snow_depth',
|
|
183
|
+
'evap_infil_loss',
|
|
184
|
+
'runoff_flow',
|
|
185
|
+
'dry_weather_inflow',
|
|
186
|
+
'gw_inflow',
|
|
187
|
+
'rdii_inflow',
|
|
188
|
+
'direct_inflow',
|
|
189
|
+
'total_lateral_inflow',
|
|
190
|
+
'flood_losses',
|
|
191
|
+
'outfall_flows',
|
|
192
|
+
'volume_stored',
|
|
193
|
+
'evap_rate',
|
|
194
|
+
'ptnl_evap_rate'
|
|
195
|
+
|
|
196
|
+
"""
|
|
197
|
+
|
|
198
|
+
@staticmethod
|
|
199
|
+
def _elementIndex(
|
|
200
|
+
elementID: str | int | None, indexSquence: Sequence[str], elementType: str
|
|
201
|
+
) -> int:
|
|
202
|
+
"""Validate the index of a model element passed to Output methods. Used to
|
|
203
|
+
convert model element names to their index in the out file.
|
|
204
|
+
|
|
205
|
+
Parameters
|
|
206
|
+
----------
|
|
207
|
+
elementID: str, int
|
|
208
|
+
The name or index of the model element listed in the index_dict dict.
|
|
209
|
+
indexSquence: one of more string
|
|
210
|
+
The ordered sequence against which to validate the index
|
|
211
|
+
(one of self.nodes, self.links, self.subcatchments).
|
|
212
|
+
elementType: str
|
|
213
|
+
The type of model element (e.g. node, link, etc.)
|
|
214
|
+
Only used to print the exception if an attribute cannot be found.
|
|
215
|
+
|
|
216
|
+
Returns
|
|
217
|
+
-------
|
|
218
|
+
int
|
|
219
|
+
The integer index of the requested element.
|
|
220
|
+
|
|
221
|
+
Raises
|
|
222
|
+
------
|
|
223
|
+
OutputException
|
|
224
|
+
Exception if element cannot be found in indexSequence.
|
|
225
|
+
|
|
226
|
+
"""
|
|
227
|
+
|
|
228
|
+
if isinstance(elementID, (int, npint)):
|
|
229
|
+
return int(elementID)
|
|
230
|
+
|
|
231
|
+
try:
|
|
232
|
+
return indexSquence.index(elementID)
|
|
233
|
+
|
|
234
|
+
# since this class can pull multiple attributes and elements in one function
|
|
235
|
+
# call it is probably better to do some pre-validation of input arguments
|
|
236
|
+
# before starting a potentially lengthy data pull
|
|
237
|
+
except ValueError:
|
|
238
|
+
raise ValueError(
|
|
239
|
+
f"{elementType} ID: {elementID} does not exist in model output."
|
|
240
|
+
)
|
|
241
|
+
|
|
242
|
+
@staticmethod
|
|
243
|
+
def _validateAttribute(
|
|
244
|
+
attribute: int | str | Sequence[int | str] | None,
|
|
245
|
+
validAttributes: EnumMeta,
|
|
246
|
+
) -> tuple[list, list]:
|
|
247
|
+
"""
|
|
248
|
+
Function to validate attribute arguments of element_series, element_attribute,
|
|
249
|
+
and element_result functions.
|
|
250
|
+
|
|
251
|
+
Parameters
|
|
252
|
+
----------
|
|
253
|
+
attribute: Union[int, str, Sequence[Union[int, str]], None]
|
|
254
|
+
The attribute to validate against validAttributes.
|
|
255
|
+
validAttributes: dict
|
|
256
|
+
THe dict of attributes against which to validate attribute.
|
|
257
|
+
|
|
258
|
+
Returns
|
|
259
|
+
-------
|
|
260
|
+
Tuple[list, list]
|
|
261
|
+
Two arrays, one of attribute names and one of attribute indicies.
|
|
262
|
+
|
|
263
|
+
"""
|
|
264
|
+
# this kind of logic was needed in the series and results functions.
|
|
265
|
+
# not sure if this is the best way, but it felt a bit DRYer to
|
|
266
|
+
# put it into a funciton
|
|
267
|
+
attributeArray: list[EnumMeta | str | int]
|
|
268
|
+
if isinstance(attribute, (type(None), EnumMeta)):
|
|
269
|
+
attributeArray = list(_enum_keys(validAttributes))
|
|
270
|
+
elif isinstance(attribute, arrayish):
|
|
271
|
+
attributeArray = list(attribute)
|
|
272
|
+
elif isinstance(attribute, (int, str)):
|
|
273
|
+
attributeArray = [attribute]
|
|
274
|
+
else:
|
|
275
|
+
raise ValueError(f"Error validating attribute {attribute!r}")
|
|
276
|
+
|
|
277
|
+
# allow mixed input of attributes
|
|
278
|
+
# accept string names, integers, or enums values in the same list
|
|
279
|
+
attributeIndexArray = []
|
|
280
|
+
for i, attrib in enumerate(attributeArray):
|
|
281
|
+
if isinstance(attrib, Enum):
|
|
282
|
+
attributeArray[i] = attrib.name.lower()
|
|
283
|
+
attributeIndexArray.append(attrib)
|
|
284
|
+
|
|
285
|
+
elif isinstance(attrib, (int, npint)):
|
|
286
|
+
# will raise index error if not in range
|
|
287
|
+
attribName = _enum_keys(validAttributes)[attrib]
|
|
288
|
+
attributeArray[i] = attribName
|
|
289
|
+
attributeIndexArray.append(_enum_get(validAttributes, attribName))
|
|
290
|
+
|
|
291
|
+
elif isinstance(attrib, str):
|
|
292
|
+
index = _enum_get(validAttributes, attrib)
|
|
293
|
+
if index is None:
|
|
294
|
+
raise ValueError(
|
|
295
|
+
f"Attribute {attrib} not in valid attribute list: {_enum_keys(validAttributes)}"
|
|
296
|
+
)
|
|
297
|
+
attributeIndexArray.append(index)
|
|
298
|
+
else:
|
|
299
|
+
raise TypeError(
|
|
300
|
+
f"Input type: {type(attrib)} not valid. Must be one of int, str, or Enum"
|
|
301
|
+
)
|
|
302
|
+
|
|
303
|
+
# attributeIndexArray = [validAttributes.get(atr, -1) for atr in attributeArray]
|
|
304
|
+
|
|
305
|
+
return attributeArray, attributeIndexArray
|
|
306
|
+
|
|
307
|
+
@staticmethod
|
|
308
|
+
def _validateElement(
|
|
309
|
+
element: int | str | Sequence[int | str] | None,
|
|
310
|
+
validElements: Sequence[str],
|
|
311
|
+
) -> tuple[list[str], list[int]]:
|
|
312
|
+
"""
|
|
313
|
+
Function to validate element arguments of element_series, element_attribute,
|
|
314
|
+
and element_result functions.
|
|
315
|
+
|
|
316
|
+
Parameters
|
|
317
|
+
----------
|
|
318
|
+
element: Union[int, str, Sequence[Union[int, str]], None]
|
|
319
|
+
The element name or index or None. If None, return all elements in
|
|
320
|
+
validElements.
|
|
321
|
+
validElements: Sequence[str]
|
|
322
|
+
Tuple of elements against which to validate element.
|
|
323
|
+
|
|
324
|
+
Returns
|
|
325
|
+
-------
|
|
326
|
+
Tuple[list, list]
|
|
327
|
+
Two arrays, one of element names and one of element indicies.
|
|
328
|
+
|
|
329
|
+
"""
|
|
330
|
+
# this kind of logic was needed in the series and results functions
|
|
331
|
+
# not sure if this is the best way, but it felt a bit DRYer to
|
|
332
|
+
# put it into a funciton
|
|
333
|
+
elementArray: list[str | int]
|
|
334
|
+
if element is None:
|
|
335
|
+
elementArray = list(validElements)
|
|
336
|
+
elif isinstance(element, arrayish):
|
|
337
|
+
elementArray = list(element)
|
|
338
|
+
else:
|
|
339
|
+
# ignore typing since types of this output list
|
|
340
|
+
# are reconciled in the next loop. mypy was complaining.
|
|
341
|
+
elementArray = [element] # type: ignore
|
|
342
|
+
|
|
343
|
+
elementIndexArray = []
|
|
344
|
+
elemNameArray = []
|
|
345
|
+
# allow mixed input of elements. string names can be mixed
|
|
346
|
+
# with integer indicies in the same input list
|
|
347
|
+
for i, elem in enumerate(elementArray):
|
|
348
|
+
if isinstance(elem, (int, npint)):
|
|
349
|
+
# will raise index error if not in range
|
|
350
|
+
elemName = validElements[elem]
|
|
351
|
+
elemNameArray.append(elemName)
|
|
352
|
+
elementIndexArray.append(elem)
|
|
353
|
+
|
|
354
|
+
elif isinstance(elem, str):
|
|
355
|
+
elementIndexArray.append(Output._elementIndex(elem, validElements, ""))
|
|
356
|
+
elemNameArray.append(elem)
|
|
357
|
+
else:
|
|
358
|
+
raise TypeError(
|
|
359
|
+
f"Input type {type(elem)} not valid. Must be one of int, str"
|
|
360
|
+
)
|
|
361
|
+
|
|
362
|
+
return elemNameArray, elementIndexArray
|
|
363
|
+
|
|
364
|
+
@staticmethod
|
|
365
|
+
def _datetime_from_swmm(swmm_datetime):
|
|
366
|
+
remaining_days = swmm_datetime % 1
|
|
367
|
+
days = swmm_datetime - remaining_days
|
|
368
|
+
seconds = remaining_days * 86400
|
|
369
|
+
dt = datetime(year=1899, month=12, day=30) + timedelta(
|
|
370
|
+
days=days, seconds=seconds
|
|
371
|
+
)
|
|
372
|
+
return dt
|
|
373
|
+
|
|
374
|
+
def _checkPollutantName(self, name: str) -> str:
|
|
375
|
+
"""Check pollutant name against existing attribute dicts.
|
|
376
|
+
Rename and and warn if existing attribute is duplicated.
|
|
377
|
+
|
|
378
|
+
Parameters
|
|
379
|
+
----------
|
|
380
|
+
name: str
|
|
381
|
+
The name of pollutant.
|
|
382
|
+
|
|
383
|
+
Returns
|
|
384
|
+
-------
|
|
385
|
+
str
|
|
386
|
+
The validated name of pollutant.
|
|
387
|
+
"""
|
|
388
|
+
|
|
389
|
+
elems = []
|
|
390
|
+
if name.lower() in _enum_keys(self.subcatch_attributes):
|
|
391
|
+
elems.append("subcatchment")
|
|
392
|
+
|
|
393
|
+
if name.lower() in _enum_keys(self.node_attributes):
|
|
394
|
+
elems.append("node")
|
|
395
|
+
|
|
396
|
+
if name.lower() in _enum_keys(self.link_attributes):
|
|
397
|
+
elems.append("link")
|
|
398
|
+
|
|
399
|
+
if name.lower() in _enum_keys(self.system_attributes):
|
|
400
|
+
elems.append("system")
|
|
401
|
+
|
|
402
|
+
if len(elems) > 0:
|
|
403
|
+
warnings.warn(
|
|
404
|
+
f"Pollutent {name} is a duplicate of existing {','.join(elems)} attribute, renaming to pol_{name}"
|
|
405
|
+
)
|
|
406
|
+
return f"pol_{name}"
|
|
407
|
+
|
|
408
|
+
return name
|
|
409
|
+
|
|
410
|
+
def _open(self) -> bool:
|
|
411
|
+
"""Open a binary file.
|
|
412
|
+
|
|
413
|
+
Parameters
|
|
414
|
+
----------
|
|
415
|
+
|
|
416
|
+
Returns
|
|
417
|
+
-------
|
|
418
|
+
bool
|
|
419
|
+
True if binary file was opened successfully.
|
|
420
|
+
|
|
421
|
+
"""
|
|
422
|
+
if not os.path.exists(self._binfile):
|
|
423
|
+
raise ValueError(f"Output file at: '{self._binfile}' does not exist")
|
|
424
|
+
|
|
425
|
+
if self._handle is None:
|
|
426
|
+
self._handle = output.init()
|
|
427
|
+
|
|
428
|
+
if not self._loaded:
|
|
429
|
+
self._loaded = True
|
|
430
|
+
output.open(self._handle, self._binfile)
|
|
431
|
+
self._start = self._datetime_from_swmm(output.get_start_date(self._handle))
|
|
432
|
+
self._report = output.get_times(self._handle, shared_enum.Time.REPORT_STEP)
|
|
433
|
+
self._period = output.get_times(self._handle, shared_enum.Time.NUM_PERIODS)
|
|
434
|
+
self._end = self._start + timedelta(seconds=self._period * self._report)
|
|
435
|
+
|
|
436
|
+
# load pollutants if not already loaded
|
|
437
|
+
if not hasattr(self, "_pollutants"):
|
|
438
|
+
# load pollutant data if it has not before
|
|
439
|
+
total = self.project_size[4]
|
|
440
|
+
self._pollutants = tuple(
|
|
441
|
+
self._checkPollutantName(
|
|
442
|
+
self._objectName(shared_enum.ElementType.POLLUT, index).lower()
|
|
443
|
+
)
|
|
444
|
+
for index in range(total)
|
|
445
|
+
)
|
|
446
|
+
|
|
447
|
+
for i, nom in enumerate(self._pollutants):
|
|
448
|
+
# extend enums to include pollutants
|
|
449
|
+
extend_enum(self.subcatch_attributes, nom.upper(), 8 + i)
|
|
450
|
+
extend_enum(self.node_attributes, nom.upper(), 6 + i)
|
|
451
|
+
extend_enum(self.link_attributes, nom.upper(), 5 + i)
|
|
452
|
+
|
|
453
|
+
if self._preload:
|
|
454
|
+
# respos = output.get_positions(self._handle)[2]
|
|
455
|
+
# self._close()
|
|
456
|
+
|
|
457
|
+
subs = list(
|
|
458
|
+
product(
|
|
459
|
+
["sub"],
|
|
460
|
+
range(len(self.subcatchments)),
|
|
461
|
+
range(len(self.subcatch_attributes)),
|
|
462
|
+
)
|
|
463
|
+
)
|
|
464
|
+
nodes = list(
|
|
465
|
+
product(
|
|
466
|
+
["node"],
|
|
467
|
+
range(len(self.nodes)),
|
|
468
|
+
range(len(self.node_attributes)),
|
|
469
|
+
)
|
|
470
|
+
)
|
|
471
|
+
links = list(
|
|
472
|
+
product(
|
|
473
|
+
["link"],
|
|
474
|
+
range(len(self.links)),
|
|
475
|
+
range(len(self.link_attributes)),
|
|
476
|
+
)
|
|
477
|
+
)
|
|
478
|
+
system = list(product(["sys"], ["sys"], range(len(self.system_attributes))))
|
|
479
|
+
|
|
480
|
+
cols = subs + nodes + links + system
|
|
481
|
+
cols.insert(0, ("datetime", 0, 0))
|
|
482
|
+
|
|
483
|
+
idx = MultiIndex.from_tuples(cols)
|
|
484
|
+
fmts = "f8" + ",f4" * (len(cols) - 1)
|
|
485
|
+
|
|
486
|
+
with open(self._binfile, "rb") as fil:
|
|
487
|
+
fil.seek(self._output_position, 0)
|
|
488
|
+
dat = numpy.core.records.fromfile(fil, formats=fmts)
|
|
489
|
+
self.data = DataFrame(dat)
|
|
490
|
+
self.data.columns = idx
|
|
491
|
+
|
|
492
|
+
return True
|
|
493
|
+
|
|
494
|
+
def _close(self) -> bool:
|
|
495
|
+
"""Close an opened binary file.
|
|
496
|
+
|
|
497
|
+
Parameters
|
|
498
|
+
----------
|
|
499
|
+
|
|
500
|
+
Returns
|
|
501
|
+
-------
|
|
502
|
+
bool
|
|
503
|
+
True if binary file was closed successfully.
|
|
504
|
+
|
|
505
|
+
"""
|
|
506
|
+
if self._loaded:
|
|
507
|
+
output.close(self._handle)
|
|
508
|
+
self._loaded = False
|
|
509
|
+
del self._handle
|
|
510
|
+
self._handle = None
|
|
511
|
+
self._delete_handle = True
|
|
512
|
+
|
|
513
|
+
return True
|
|
514
|
+
|
|
515
|
+
###### outfile property getters ######
|
|
516
|
+
@property
|
|
517
|
+
def _output_position(self):
|
|
518
|
+
if not hasattr(self, "__output_position"):
|
|
519
|
+
with open(self._binfile, "rb") as fil:
|
|
520
|
+
fil.seek(-4 * 4, SEEK_END)
|
|
521
|
+
self.__output_position = struct.unpack("i", fil.read(4))[0]
|
|
522
|
+
|
|
523
|
+
return self.__output_position
|
|
524
|
+
|
|
525
|
+
@property # type: ignore
|
|
526
|
+
@output_open_handler
|
|
527
|
+
def report(self) -> int:
|
|
528
|
+
"""Return the reporting timestep in seconds.
|
|
529
|
+
|
|
530
|
+
Returns
|
|
531
|
+
-------
|
|
532
|
+
int
|
|
533
|
+
The reporting timestep in seconds.
|
|
534
|
+
|
|
535
|
+
"""
|
|
536
|
+
return self._report
|
|
537
|
+
|
|
538
|
+
@property # type: ignore
|
|
539
|
+
@output_open_handler
|
|
540
|
+
def start(self) -> datetime:
|
|
541
|
+
"""Return the reporting start datetime.
|
|
542
|
+
|
|
543
|
+
Returns
|
|
544
|
+
-------
|
|
545
|
+
datetime
|
|
546
|
+
The reporting start datetime.
|
|
547
|
+
|
|
548
|
+
"""
|
|
549
|
+
return self._start
|
|
550
|
+
|
|
551
|
+
@property # type: ignore
|
|
552
|
+
@output_open_handler
|
|
553
|
+
def end(self) -> datetime:
|
|
554
|
+
"""Return the reporting end datetime.
|
|
555
|
+
|
|
556
|
+
Returns
|
|
557
|
+
-------
|
|
558
|
+
datetime
|
|
559
|
+
The reporting end datetime.
|
|
560
|
+
"""
|
|
561
|
+
return self._end
|
|
562
|
+
|
|
563
|
+
@property # type: ignore
|
|
564
|
+
@output_open_handler
|
|
565
|
+
def period(self) -> int:
|
|
566
|
+
"""Return the number of reporting timesteps in the binary output file.
|
|
567
|
+
|
|
568
|
+
Returns
|
|
569
|
+
-------
|
|
570
|
+
int
|
|
571
|
+
The number of reporting timesteps.
|
|
572
|
+
"""
|
|
573
|
+
return self._period
|
|
574
|
+
|
|
575
|
+
@property # type: ignore
|
|
576
|
+
def project_size(self) -> list[int]:
|
|
577
|
+
"""Returns the number of each model element type available in out binary output file
|
|
578
|
+
in the following order:
|
|
579
|
+
|
|
580
|
+
[subcatchment, node, link, system, pollutant]
|
|
581
|
+
|
|
582
|
+
Returns
|
|
583
|
+
-------
|
|
584
|
+
list
|
|
585
|
+
A list of numbers of each model type.
|
|
586
|
+
|
|
587
|
+
[nSubcatchments, nNodes, nLinks, nSystems(1), nPollutants]
|
|
588
|
+
|
|
589
|
+
"""
|
|
590
|
+
if not hasattr(self, "_project_size"):
|
|
591
|
+
self._load_project_size()
|
|
592
|
+
return self._project_size
|
|
593
|
+
|
|
594
|
+
@output_open_handler
|
|
595
|
+
def _load_project_size(self) -> None:
|
|
596
|
+
"""Load model size into self._project_size"""
|
|
597
|
+
self._project_size = output.get_proj_size(self._handle)
|
|
598
|
+
|
|
599
|
+
@property
|
|
600
|
+
def pollutants(self) -> tuple[str, ...]:
|
|
601
|
+
"""Return a tuple of pollutants available in SWMM binary output file.
|
|
602
|
+
|
|
603
|
+
Returns
|
|
604
|
+
-------
|
|
605
|
+
Tuple[str]
|
|
606
|
+
A tuple of pollutant names.
|
|
607
|
+
|
|
608
|
+
"""
|
|
609
|
+
|
|
610
|
+
# chose not to write a pollutant loader method
|
|
611
|
+
# because loading such is kind of imperative to the functionality
|
|
612
|
+
# of other data getter methods, which don't necessarily
|
|
613
|
+
# call pollutants method. Instead, pollutant loading logic is
|
|
614
|
+
# thrown in the _open() method, and this method calls open if
|
|
615
|
+
# pollutants are not available.
|
|
616
|
+
if self._pollutants is None:
|
|
617
|
+
self._open()
|
|
618
|
+
|
|
619
|
+
return self._pollutants
|
|
620
|
+
|
|
621
|
+
@property # type: ignore
|
|
622
|
+
@output_open_handler
|
|
623
|
+
def _unit(self) -> tuple[int]:
|
|
624
|
+
"""Return SWMM binary output file unit type from `swmm.toolkit.shared_enum.UnitSystem`.
|
|
625
|
+
|
|
626
|
+
Returns
|
|
627
|
+
-------
|
|
628
|
+
Tuple[int]
|
|
629
|
+
Tuple of integers indicating system units, flow units, and units for each pollutant.
|
|
630
|
+
|
|
631
|
+
"""
|
|
632
|
+
return tuple(output.get_units(self._handle)) # type: ignore
|
|
633
|
+
|
|
634
|
+
@property
|
|
635
|
+
def units(self) -> list[str]:
|
|
636
|
+
"""Return SWMM binary output file unit type from `swmm.toolkit.shared_enum.UnitSystem`.
|
|
637
|
+
|
|
638
|
+
Returns
|
|
639
|
+
-------
|
|
640
|
+
List[str]
|
|
641
|
+
List of string names for system units, flow units, and units for each pollutant.
|
|
642
|
+
|
|
643
|
+
Values returned are the names from swmm.toolkit.shared_enum:
|
|
644
|
+
UnitSystem
|
|
645
|
+
FlowUnits
|
|
646
|
+
ConcUnits
|
|
647
|
+
|
|
648
|
+
"""
|
|
649
|
+
return [
|
|
650
|
+
shared_enum.UnitSystem(self._unit[0]).name,
|
|
651
|
+
shared_enum.FlowUnits(self._unit[1]).name,
|
|
652
|
+
] + [shared_enum.ConcUnits(i).name for i in self._unit[2:]]
|
|
653
|
+
|
|
654
|
+
@property # type: ignore
|
|
655
|
+
@output_open_handler
|
|
656
|
+
def _version(self) -> int:
|
|
657
|
+
"""Return SWMM version used to generate SWMM binary output file results.
|
|
658
|
+
|
|
659
|
+
Returns
|
|
660
|
+
-------
|
|
661
|
+
int
|
|
662
|
+
Integer representation of SWMM version used to make output file.
|
|
663
|
+
|
|
664
|
+
"""
|
|
665
|
+
return output.get_version(self._handle)
|
|
666
|
+
|
|
667
|
+
@output_open_handler
|
|
668
|
+
def _objectName(self, object_type: int, index: int) -> str:
|
|
669
|
+
"""Get object name from SWMM binary output file using object type and object index.
|
|
670
|
+
|
|
671
|
+
Parameters
|
|
672
|
+
----------
|
|
673
|
+
object_type: int
|
|
674
|
+
The object type from swmm.toolkit.shared_enum.ElementType.
|
|
675
|
+
index: int
|
|
676
|
+
The object index.
|
|
677
|
+
|
|
678
|
+
Returns
|
|
679
|
+
-------
|
|
680
|
+
str
|
|
681
|
+
object name
|
|
682
|
+
|
|
683
|
+
"""
|
|
684
|
+
return output.get_elem_name(self._handle, object_type, index)
|
|
685
|
+
|
|
686
|
+
##### timestep setters and getters #####
|
|
687
|
+
def _time2step(
|
|
688
|
+
self,
|
|
689
|
+
dateTime: (
|
|
690
|
+
None
|
|
691
|
+
| str
|
|
692
|
+
| int
|
|
693
|
+
| datetime
|
|
694
|
+
| Timestamp
|
|
695
|
+
| datetime64
|
|
696
|
+
| Sequence[str | int | datetime | Timestamp | datetime64]
|
|
697
|
+
),
|
|
698
|
+
ifNone: int = 0,
|
|
699
|
+
method: str = "nearest",
|
|
700
|
+
) -> list[int]:
|
|
701
|
+
"""Convert datetime value to SWMM timestep index. By deafult, this returns the nearest timestep to
|
|
702
|
+
to the requested date, so it will always return a time index available in the binary output file.
|
|
703
|
+
|
|
704
|
+
|
|
705
|
+
Parameters
|
|
706
|
+
----------
|
|
707
|
+
dateTime: datetime-like or string or sequence of such
|
|
708
|
+
datetime to convert. Must be a datetime-like object or convertable
|
|
709
|
+
with `pd.to_datetime`.
|
|
710
|
+
|
|
711
|
+
ifNone: int
|
|
712
|
+
The value to return if dateTime is None, defaults to 0.
|
|
713
|
+
|
|
714
|
+
method: str
|
|
715
|
+
The method name to pass to pandas `get_indexer`_, default to "nearest.
|
|
716
|
+
|
|
717
|
+
.. _get_indexer: https://pandas.pydata.org/docs/reference/api/pandas.Index.get_indexer.html
|
|
718
|
+
|
|
719
|
+
Returns
|
|
720
|
+
-------
|
|
721
|
+
Union[int, np.ndarray]
|
|
722
|
+
SWMM model time step or array of time steps
|
|
723
|
+
|
|
724
|
+
"""
|
|
725
|
+
if dateTime is None:
|
|
726
|
+
return [ifNone]
|
|
727
|
+
|
|
728
|
+
dt = asarray(dateTime).flatten()
|
|
729
|
+
|
|
730
|
+
# if passing swmm time step, no indexing necessary
|
|
731
|
+
if dt.dtype in (float, int):
|
|
732
|
+
return dt.astype(int).tolist()
|
|
733
|
+
|
|
734
|
+
# ensure datetime value
|
|
735
|
+
dt = to_datetime(dt)
|
|
736
|
+
return self.timeIndex.get_indexer(dt, method=method).tolist()
|
|
737
|
+
|
|
738
|
+
@property
|
|
739
|
+
def timeIndex(self) -> DatetimeIndex:
|
|
740
|
+
"""Returns DatetimeIndex of reporting timeseries in binary output file.
|
|
741
|
+
|
|
742
|
+
Returns
|
|
743
|
+
-------
|
|
744
|
+
pd.DatetimeIndex
|
|
745
|
+
A pandas `DatetimeIndex`_ for each reporting timestep.
|
|
746
|
+
|
|
747
|
+
.. _DatetimeIndex: https://pandas.pydata.org/docs/reference/api/pandas.DatetimeIndex.html?highlight=datetimeindex#pandas.DatetimeIndex
|
|
748
|
+
|
|
749
|
+
"""
|
|
750
|
+
if not hasattr(self, "_timeIndex"):
|
|
751
|
+
self._load_timeIndex()
|
|
752
|
+
return self._timeIndex
|
|
753
|
+
|
|
754
|
+
@output_open_handler
|
|
755
|
+
def _load_timeIndex(self) -> None:
|
|
756
|
+
"""Load model reporting times into self._times"""
|
|
757
|
+
self._timeIndex = DatetimeIndex(
|
|
758
|
+
[
|
|
759
|
+
self._start + timedelta(seconds=self._report) * step
|
|
760
|
+
for step in range(1, self._period + 1)
|
|
761
|
+
]
|
|
762
|
+
)
|
|
763
|
+
|
|
764
|
+
##### model element setters and getters #####
|
|
765
|
+
def _subcatchmentIndex(
|
|
766
|
+
self, subcatchment: str | int | Sequence[str | int] | None
|
|
767
|
+
) -> list[int] | int:
|
|
768
|
+
"""Get the swmm index for subcatchment.
|
|
769
|
+
|
|
770
|
+
Parameters
|
|
771
|
+
----------
|
|
772
|
+
subcatchment: Union[str, int, Sequence[Union[str, int]]]
|
|
773
|
+
The name(s) of subcatchment(s).
|
|
774
|
+
|
|
775
|
+
Returns
|
|
776
|
+
-------
|
|
777
|
+
Union[List[int], int]
|
|
778
|
+
The SWMM index(s) of subcatchment(s).
|
|
779
|
+
|
|
780
|
+
"""
|
|
781
|
+
|
|
782
|
+
if isinstance(subcatchment, (str, int, type(None))):
|
|
783
|
+
return self._elementIndex(subcatchment, self.subcatchments, "subcatchment")
|
|
784
|
+
|
|
785
|
+
elif subcatchment is not None:
|
|
786
|
+
return [
|
|
787
|
+
self._elementIndex(sub, self.subcatchments, "subcatchment")
|
|
788
|
+
for sub in subcatchment
|
|
789
|
+
]
|
|
790
|
+
else:
|
|
791
|
+
raise TypeError("Invalid type for _subcatchmentIndex argument")
|
|
792
|
+
|
|
793
|
+
@property
|
|
794
|
+
def subcatchments(self) -> tuple[str, ...]:
|
|
795
|
+
"""Return a tuple of subcatchments available in SWMM output binary file.
|
|
796
|
+
|
|
797
|
+
Returns
|
|
798
|
+
-------
|
|
799
|
+
Tuple[str]
|
|
800
|
+
A tuple of model subcatchment names.
|
|
801
|
+
|
|
802
|
+
"""
|
|
803
|
+
if not hasattr(self, "_subcatchments"):
|
|
804
|
+
self._load_subcatchments()
|
|
805
|
+
return self._subcatchments
|
|
806
|
+
|
|
807
|
+
@output_open_handler
|
|
808
|
+
def _load_subcatchments(self) -> None:
|
|
809
|
+
"""Load model size into self._project_size"""
|
|
810
|
+
total = self.project_size[0]
|
|
811
|
+
|
|
812
|
+
self._subcatchments = tuple(
|
|
813
|
+
self._objectName(shared_enum.ElementType.SUBCATCH, index)
|
|
814
|
+
for index in range(total)
|
|
815
|
+
)
|
|
816
|
+
|
|
817
|
+
def _nodeIndex(
|
|
818
|
+
self, node: str | int | Sequence[str | int] | None
|
|
819
|
+
) -> list[int] | int:
|
|
820
|
+
"""Get the swmm index for node.
|
|
821
|
+
|
|
822
|
+
Parameters
|
|
823
|
+
----------
|
|
824
|
+
node: Union[str, int, Sequence[Union[str, int]]]
|
|
825
|
+
The name(s) of node(s)
|
|
826
|
+
|
|
827
|
+
Returns
|
|
828
|
+
-------
|
|
829
|
+
Union[List[int], int]
|
|
830
|
+
The SWMM index(s) of node(s).
|
|
831
|
+
|
|
832
|
+
"""
|
|
833
|
+
|
|
834
|
+
if isinstance(node, (str, int, type(None))):
|
|
835
|
+
return self._elementIndex(node, self.nodes, "node")
|
|
836
|
+
|
|
837
|
+
# elif here because mypy issues
|
|
838
|
+
elif node is not None:
|
|
839
|
+
return [self._elementIndex(nd, self.nodes, "node") for nd in node]
|
|
840
|
+
|
|
841
|
+
else:
|
|
842
|
+
raise TypeError("Invalid type for self._nodeIndex argument")
|
|
843
|
+
|
|
844
|
+
@property
|
|
845
|
+
def nodes(self) -> tuple[str, ...]:
|
|
846
|
+
"""Return a tuple of nodes available in SWMM binary output file.
|
|
847
|
+
|
|
848
|
+
Returns
|
|
849
|
+
-------
|
|
850
|
+
Tuple[str]
|
|
851
|
+
A tuple of model node names.
|
|
852
|
+
|
|
853
|
+
"""
|
|
854
|
+
if not hasattr(self, "_nodes"):
|
|
855
|
+
self._load_nodes()
|
|
856
|
+
return self._nodes
|
|
857
|
+
|
|
858
|
+
@output_open_handler
|
|
859
|
+
def _load_nodes(self) -> None:
|
|
860
|
+
"""Load model nodes into self._nodes"""
|
|
861
|
+
total = self.project_size[1]
|
|
862
|
+
|
|
863
|
+
self._nodes = tuple(
|
|
864
|
+
self._objectName(shared_enum.ElementType.NODE, index)
|
|
865
|
+
for index in range(total)
|
|
866
|
+
)
|
|
867
|
+
|
|
868
|
+
def _linkIndex(
|
|
869
|
+
self, link: str | int | Sequence[str | int] | None
|
|
870
|
+
) -> list[int] | int:
|
|
871
|
+
"""Get the swmm index for link.
|
|
872
|
+
|
|
873
|
+
Parameters
|
|
874
|
+
----------
|
|
875
|
+
link: Union[str, int, Sequence[Union[str, int]]]
|
|
876
|
+
The name(s) of link(s)
|
|
877
|
+
|
|
878
|
+
Returns
|
|
879
|
+
-------
|
|
880
|
+
Union[List[int], int]
|
|
881
|
+
SWMM index(s) of link(s).
|
|
882
|
+
|
|
883
|
+
"""
|
|
884
|
+
if isinstance(link, (str, int, type(None))):
|
|
885
|
+
return self._elementIndex(link, self.links, "link")
|
|
886
|
+
|
|
887
|
+
# elif here because mypy issues
|
|
888
|
+
elif link is not None:
|
|
889
|
+
return [self._elementIndex(lnk, self.links, "link") for lnk in link]
|
|
890
|
+
|
|
891
|
+
else:
|
|
892
|
+
raise TypeError("Invalid type for self._linkIndex argument")
|
|
893
|
+
|
|
894
|
+
@property
|
|
895
|
+
def links(self) -> tuple[str, ...]:
|
|
896
|
+
"""Return a tuple of links available in SWMM binary output file.
|
|
897
|
+
|
|
898
|
+
Returns
|
|
899
|
+
-------
|
|
900
|
+
Tuple[str]
|
|
901
|
+
A tuple of model link names.
|
|
902
|
+
|
|
903
|
+
"""
|
|
904
|
+
if not hasattr(self, "_links"):
|
|
905
|
+
self._load_links()
|
|
906
|
+
return self._links
|
|
907
|
+
|
|
908
|
+
@output_open_handler
|
|
909
|
+
def _load_links(self) -> None:
|
|
910
|
+
"""Load model links into self._links"""
|
|
911
|
+
total = self.project_size[2]
|
|
912
|
+
|
|
913
|
+
self._links = tuple(
|
|
914
|
+
self._objectName(shared_enum.ElementType.LINK, index)
|
|
915
|
+
for index in range(total)
|
|
916
|
+
)
|
|
917
|
+
|
|
918
|
+
####### series getters #######
|
|
919
|
+
|
|
920
|
+
def _memory_series_getter(self, elemType: str) -> Callable:
|
|
921
|
+
if elemType == "sys":
|
|
922
|
+
|
|
923
|
+
def getter( # type: ignore
|
|
924
|
+
_handle, Attr: EnumMeta, startIndex: int, endIndex: int
|
|
925
|
+
) -> ndarray:
|
|
926
|
+
# col = f"{type};{type};{Attr.value}"
|
|
927
|
+
# return self.data[col][startIndex:endIndex]
|
|
928
|
+
return self.data.loc[
|
|
929
|
+
startIndex : endIndex - 1, # type: ignore
|
|
930
|
+
IndexSlice[elemType, elemType, Attr.value], # type: ignore
|
|
931
|
+
].to_numpy()
|
|
932
|
+
|
|
933
|
+
else:
|
|
934
|
+
|
|
935
|
+
def getter( # type: ignore
|
|
936
|
+
_handle, elemIdx: int, Attr: EnumMeta, startIndex: int, endIndex: int
|
|
937
|
+
) -> ndarray:
|
|
938
|
+
# col = f"{type};{elemIdx};{Attr.value}"
|
|
939
|
+
# return self.data[col][startIndex:endIndex]
|
|
940
|
+
return self.data.loc[
|
|
941
|
+
startIndex : endIndex - 1, IndexSlice[elemType, elemIdx, Attr.value] # type: ignore
|
|
942
|
+
].to_numpy()
|
|
943
|
+
|
|
944
|
+
return getter
|
|
945
|
+
|
|
946
|
+
def _model_series(
|
|
947
|
+
self,
|
|
948
|
+
elementIndexArray: list[int],
|
|
949
|
+
attributeIndexArray: list[EnumMeta],
|
|
950
|
+
startIndex: int,
|
|
951
|
+
endIndex: int,
|
|
952
|
+
columns: str | None,
|
|
953
|
+
getterFunc: Callable,
|
|
954
|
+
) -> ndarray:
|
|
955
|
+
"""
|
|
956
|
+
Base series getter for any attribute. The function consilidates the logic
|
|
957
|
+
necessary to build long or wide timeseries dataframes for each type of swmm
|
|
958
|
+
model element.
|
|
959
|
+
|
|
960
|
+
Parameters
|
|
961
|
+
----------
|
|
962
|
+
elementIndexArray: List[int]
|
|
963
|
+
Array of SWMM model element indicies
|
|
964
|
+
attributeIndexArray: List[enum]
|
|
965
|
+
Array of attribute Enums to pull for each element
|
|
966
|
+
startIndex: int
|
|
967
|
+
SWMM simulation time index to start timeseries
|
|
968
|
+
endIndex: int
|
|
969
|
+
SWMM simulation time index to end timeseries
|
|
970
|
+
columns: Optional[str]
|
|
971
|
+
Decide whether or not to break out elements or attributes as columns. May be one of:
|
|
972
|
+
|
|
973
|
+
None: Return long-form data with one column for each data point
|
|
974
|
+
|
|
975
|
+
'elem': Return data with a column for each element. If more than one attribute are given, attribute names are added to the index.
|
|
976
|
+
|
|
977
|
+
'attr': Return data with a column for each attribute. If more than one element are given, element names are added to the index.
|
|
978
|
+
|
|
979
|
+
getterFunc: Callable
|
|
980
|
+
The swmm.toolkit series getter function. Should be one of:
|
|
981
|
+
|
|
982
|
+
swmm.toolkit.output.get_subcatch_series
|
|
983
|
+
swmm.toolkit.output.get_node_series
|
|
984
|
+
swmm.toolkit.output.get_link_series
|
|
985
|
+
|
|
986
|
+
Returns
|
|
987
|
+
-------
|
|
988
|
+
np.ndarray
|
|
989
|
+
array of SWMM timeseries results
|
|
990
|
+
|
|
991
|
+
Raises
|
|
992
|
+
------
|
|
993
|
+
ValueError
|
|
994
|
+
Value error if columns is not one of "elem", "attr", or None
|
|
995
|
+
"""
|
|
996
|
+
|
|
997
|
+
if columns not in ("elem", "attr", None):
|
|
998
|
+
raise ValueError(
|
|
999
|
+
f"columns must be one of 'elem','attr', or None. {columns} was given"
|
|
1000
|
+
)
|
|
1001
|
+
|
|
1002
|
+
if columns is None:
|
|
1003
|
+
return concatenate(
|
|
1004
|
+
[
|
|
1005
|
+
concatenate(
|
|
1006
|
+
[
|
|
1007
|
+
getterFunc(
|
|
1008
|
+
self._handle, elemIdx, Attr, startIndex, endIndex
|
|
1009
|
+
)
|
|
1010
|
+
for Attr in attributeIndexArray
|
|
1011
|
+
],
|
|
1012
|
+
axis=0,
|
|
1013
|
+
)
|
|
1014
|
+
for elemIdx in elementIndexArray
|
|
1015
|
+
],
|
|
1016
|
+
axis=0,
|
|
1017
|
+
)
|
|
1018
|
+
|
|
1019
|
+
elif columns.lower() == "attr":
|
|
1020
|
+
return concatenate(
|
|
1021
|
+
[
|
|
1022
|
+
stack(
|
|
1023
|
+
[
|
|
1024
|
+
getterFunc(
|
|
1025
|
+
self._handle, elemIdx, Attr, startIndex, endIndex
|
|
1026
|
+
)
|
|
1027
|
+
for Attr in attributeIndexArray
|
|
1028
|
+
],
|
|
1029
|
+
axis=1,
|
|
1030
|
+
)
|
|
1031
|
+
for elemIdx in elementIndexArray
|
|
1032
|
+
],
|
|
1033
|
+
axis=0,
|
|
1034
|
+
)
|
|
1035
|
+
|
|
1036
|
+
elif columns.lower() == "elem":
|
|
1037
|
+
return concatenate(
|
|
1038
|
+
[
|
|
1039
|
+
stack(
|
|
1040
|
+
[
|
|
1041
|
+
getterFunc(
|
|
1042
|
+
self._handle, elemIdx, Attr, startIndex, endIndex
|
|
1043
|
+
)
|
|
1044
|
+
for elemIdx in elementIndexArray
|
|
1045
|
+
],
|
|
1046
|
+
axis=1,
|
|
1047
|
+
)
|
|
1048
|
+
for Attr in attributeIndexArray
|
|
1049
|
+
],
|
|
1050
|
+
axis=0,
|
|
1051
|
+
)
|
|
1052
|
+
else:
|
|
1053
|
+
raise Exception("Columns must be None, 'attr', or 'elem'")
|
|
1054
|
+
|
|
1055
|
+
def _model_series_index(
|
|
1056
|
+
self,
|
|
1057
|
+
elementArray: list[str],
|
|
1058
|
+
attributeArray: list[str],
|
|
1059
|
+
startIndex: int,
|
|
1060
|
+
endIndex: int,
|
|
1061
|
+
columns: str | None,
|
|
1062
|
+
) -> tuple:
|
|
1063
|
+
"""
|
|
1064
|
+
Base dataframe index getter for model timeseries. The function consilidates the logic
|
|
1065
|
+
necessary to build a data frame index for long or wide dataframes built with time series
|
|
1066
|
+
getters.
|
|
1067
|
+
|
|
1068
|
+
Parameters
|
|
1069
|
+
----------
|
|
1070
|
+
elementArray: List[str]
|
|
1071
|
+
Array of SWMM model element names
|
|
1072
|
+
attributeArray: List[str]
|
|
1073
|
+
Array of attribute names pulled for each element
|
|
1074
|
+
startIndex: int
|
|
1075
|
+
SWMM simulation time index to start timeseries
|
|
1076
|
+
endIndex: int
|
|
1077
|
+
SWMM simulation time index to end timeseries
|
|
1078
|
+
columns: Optional[str]
|
|
1079
|
+
Decide whether or not to break out elements or attributes as columns. May be one of:
|
|
1080
|
+
|
|
1081
|
+
None: Return long-form data with one column for each data point
|
|
1082
|
+
|
|
1083
|
+
'elem': Return data with a column for each element. If more than one attribute are given, attribute names are added to the index.
|
|
1084
|
+
|
|
1085
|
+
'attr': Return data with a column for each attribute. If more than one element are given, element names are added to the index.
|
|
1086
|
+
|
|
1087
|
+
Returns
|
|
1088
|
+
-------
|
|
1089
|
+
(pd.MultiIndex, Union[list,np.ndarray])
|
|
1090
|
+
A pandas MultiIndex for the row indicies and an iterable of column names
|
|
1091
|
+
|
|
1092
|
+
Raises
|
|
1093
|
+
------
|
|
1094
|
+
ValueError
|
|
1095
|
+
Value error if columns is not one of "elem", "attr", or None
|
|
1096
|
+
|
|
1097
|
+
"""
|
|
1098
|
+
|
|
1099
|
+
if columns not in ("elem", "attr", None):
|
|
1100
|
+
raise ValueError(
|
|
1101
|
+
f"columns must be one of 'elem','attr', or None. {columns} was given"
|
|
1102
|
+
)
|
|
1103
|
+
|
|
1104
|
+
if columns is None:
|
|
1105
|
+
dtIndex = tile(
|
|
1106
|
+
self.timeIndex[startIndex:endIndex],
|
|
1107
|
+
len(elementArray) * len(attributeArray),
|
|
1108
|
+
)
|
|
1109
|
+
indexArrays = [dtIndex]
|
|
1110
|
+
names = ["datetime"]
|
|
1111
|
+
cols = ["result"]
|
|
1112
|
+
if len(elementArray) > 1:
|
|
1113
|
+
indexArrays.append(
|
|
1114
|
+
asarray(elementArray).repeat(
|
|
1115
|
+
(endIndex - startIndex) * len(attributeArray)
|
|
1116
|
+
)
|
|
1117
|
+
)
|
|
1118
|
+
names.append("element")
|
|
1119
|
+
if len(attributeArray) > 1:
|
|
1120
|
+
indexArrays.append(
|
|
1121
|
+
tile(asarray(attributeArray), len(elementArray)).repeat(
|
|
1122
|
+
endIndex - startIndex
|
|
1123
|
+
)
|
|
1124
|
+
)
|
|
1125
|
+
names.append("attribute")
|
|
1126
|
+
|
|
1127
|
+
elif columns.lower() == "attr":
|
|
1128
|
+
dtIndex = tile(self.timeIndex[startIndex:endIndex], len(elementArray))
|
|
1129
|
+
indexArrays = [dtIndex]
|
|
1130
|
+
names = ["datetime"]
|
|
1131
|
+
cols = attributeArray
|
|
1132
|
+
if len(elementArray) > 1:
|
|
1133
|
+
indexArrays.append(asarray(elementArray).repeat(endIndex - startIndex))
|
|
1134
|
+
names.append("element")
|
|
1135
|
+
|
|
1136
|
+
elif columns.lower() == "elem":
|
|
1137
|
+
dtIndex = tile(self.timeIndex[startIndex:endIndex], len(attributeArray))
|
|
1138
|
+
indexArrays = [dtIndex]
|
|
1139
|
+
names = ["datetime"]
|
|
1140
|
+
cols = elementArray
|
|
1141
|
+
|
|
1142
|
+
if len(attributeArray) > 1:
|
|
1143
|
+
indexArrays.append(
|
|
1144
|
+
asarray(attributeArray).repeat(endIndex - startIndex)
|
|
1145
|
+
)
|
|
1146
|
+
names.append("attribute")
|
|
1147
|
+
index = (
|
|
1148
|
+
MultiIndex.from_arrays(
|
|
1149
|
+
indexArrays,
|
|
1150
|
+
names=names,
|
|
1151
|
+
)
|
|
1152
|
+
if len(indexArrays) > 1
|
|
1153
|
+
else Index(indexArrays[0], name=names[0])
|
|
1154
|
+
)
|
|
1155
|
+
|
|
1156
|
+
return index, cols
|
|
1157
|
+
|
|
1158
|
+
def subcatch_series(
|
|
1159
|
+
self,
|
|
1160
|
+
subcatchment: int | str | Sequence[int | str] | None,
|
|
1161
|
+
attribute: int | str | EnumMeta | Sequence[int | str | EnumMeta] | None = (
|
|
1162
|
+
"rainfall",
|
|
1163
|
+
"runoff_rate",
|
|
1164
|
+
"gw_outflow_rate",
|
|
1165
|
+
),
|
|
1166
|
+
start: str | int | datetime | None = None,
|
|
1167
|
+
end: str | int | datetime | None = None,
|
|
1168
|
+
columns: str | None = "attr",
|
|
1169
|
+
asframe: bool = True,
|
|
1170
|
+
) -> DataFrame | ndarray:
|
|
1171
|
+
"""Get one or more time series for one or more subcatchment attributes.
|
|
1172
|
+
Specify series start index and end index to get desired time range.
|
|
1173
|
+
|
|
1174
|
+
Parameters
|
|
1175
|
+
----------
|
|
1176
|
+
subcatchment: Union[int, str, Sequence[Union[int, str]], None]
|
|
1177
|
+
The subcatchment index or name.
|
|
1178
|
+
|
|
1179
|
+
attribute: int | str | EnumMeta | Sequence[int | str | EnumMeta] | None,
|
|
1180
|
+
The attribute index or name.
|
|
1181
|
+
|
|
1182
|
+
On of:
|
|
1183
|
+
|
|
1184
|
+
**rainfall, snow_depth, evap_loss, infil_loss, runoff_rate, gw_outflow_rate,
|
|
1185
|
+
gw_table_elev, soil_moisture**.
|
|
1186
|
+
|
|
1187
|
+
|
|
1188
|
+
Defaults to: `('rainfall', 'runoff_rate', 'gw_outflow_rate').`
|
|
1189
|
+
|
|
1190
|
+
|
|
1191
|
+
Can also input the integer index of the attribute you would like to
|
|
1192
|
+
pull or the actual enum from Output.subcatch_attributes.
|
|
1193
|
+
|
|
1194
|
+
Setting to None indicates all attributes.
|
|
1195
|
+
|
|
1196
|
+
start: Union[str,int, datetime, None], optional
|
|
1197
|
+
The start datetime or index of from which to return series, defaults to None.
|
|
1198
|
+
|
|
1199
|
+
Setting to None indicates simulation start.
|
|
1200
|
+
|
|
1201
|
+
end: Union[str,int, datetime, None], optional
|
|
1202
|
+
The end datetime or index of from which to return series, defaults to None.
|
|
1203
|
+
|
|
1204
|
+
Setting to None indicates simulation end.
|
|
1205
|
+
|
|
1206
|
+
columns: Optional[str], optional
|
|
1207
|
+
Decide whether or not to break out elements or attributes as columns. May be one of:
|
|
1208
|
+
|
|
1209
|
+
None: Return long-form data with one column for each data point
|
|
1210
|
+
|
|
1211
|
+
'elem': Return data with a column for each element. If more than one attribute are given, attribute names are added to the index.
|
|
1212
|
+
|
|
1213
|
+
'attr': Return data with a column for each attribute. If more than one element are given, element names are added to the index.
|
|
1214
|
+
|
|
1215
|
+
defaults to 'attr'.
|
|
1216
|
+
|
|
1217
|
+
asframe: bool
|
|
1218
|
+
A switch to return an indexed DataFrame. Set to False to get an array of values only, defaults to True.
|
|
1219
|
+
|
|
1220
|
+
Returns
|
|
1221
|
+
-------
|
|
1222
|
+
Union[pd.DataFrame,np.ndarray]
|
|
1223
|
+
A DataFrame or ndarray of attribute values in each column for requested
|
|
1224
|
+
date range and subcatchments.
|
|
1225
|
+
|
|
1226
|
+
Examples
|
|
1227
|
+
---------
|
|
1228
|
+
|
|
1229
|
+
Pull single time series for a single subcatchment
|
|
1230
|
+
|
|
1231
|
+
>>> from swmm.pandas import Output,test_out_path
|
|
1232
|
+
>>> out = Output(test_out_path)
|
|
1233
|
+
>>> out.subcatch_series('SUB1', 'runoff_rate')
|
|
1234
|
+
runoff_rate
|
|
1235
|
+
datetime
|
|
1236
|
+
1900-01-01 00:05:00 0.000000
|
|
1237
|
+
1900-01-01 00:10:00 0.000000
|
|
1238
|
+
1900-01-01 00:15:00 0.000000
|
|
1239
|
+
1900-01-01 00:20:00 0.000000
|
|
1240
|
+
1900-01-01 00:25:00 0.000000
|
|
1241
|
+
... ...
|
|
1242
|
+
1900-01-01 23:40:00 0.025057
|
|
1243
|
+
1900-01-01 23:45:00 0.025057
|
|
1244
|
+
1900-01-01 23:50:00 0.025057
|
|
1245
|
+
1900-01-01 23:55:00 0.025057
|
|
1246
|
+
1900-01-02 00:00:00 0.025057
|
|
1247
|
+
[288 rows x 1 columns]
|
|
1248
|
+
|
|
1249
|
+
Pull a wide-form dataframe for all parameters for a catchment
|
|
1250
|
+
|
|
1251
|
+
>>> out.subcatch_series('SUB1', out.subcatch_attributes)
|
|
1252
|
+
rainfall snow_depth evap_loss infil_loss ... soil_moisture groundwater pol_rainfall sewage
|
|
1253
|
+
datetime ...
|
|
1254
|
+
1900-01-01 00:05:00 0.03000 0.0 0.0 0.020820 ... 0.276035 0.0 0.0 0.0
|
|
1255
|
+
1900-01-01 00:10:00 0.03000 0.0 0.0 0.020952 ... 0.276053 0.0 0.0 0.0
|
|
1256
|
+
1900-01-01 00:15:00 0.03000 0.0 0.0 0.021107 ... 0.276071 0.0 0.0 0.0
|
|
1257
|
+
1900-01-01 00:20:00 0.03000 0.0 0.0 0.021260 ... 0.276089 0.0 0.0 0.0
|
|
1258
|
+
1900-01-01 00:25:00 0.03000 0.0 0.0 0.021397 ... 0.276107 0.0 0.0 0.0
|
|
1259
|
+
... ... ... ... ... ... ... ... ... ...
|
|
1260
|
+
1900-01-01 23:40:00 0.03224 0.0 0.0 0.027270 ... 0.280026 0.0 100.0 0.0
|
|
1261
|
+
1900-01-01 23:45:00 0.03224 0.0 0.0 0.027270 ... 0.280026 0.0 100.0 0.0
|
|
1262
|
+
1900-01-01 23:50:00 0.03224 0.0 0.0 0.027270 ... 0.280026 0.0 100.0 0.0
|
|
1263
|
+
1900-01-01 23:55:00 0.03224 0.0 0.0 0.027270 ... 0.280026 0.0 100.0 0.0
|
|
1264
|
+
1900-01-02 00:00:00 0.00000 0.0 0.0 0.027270 ... 0.280026 0.0 100.0 0.0
|
|
1265
|
+
[288 rows x 11 columns]
|
|
1266
|
+
|
|
1267
|
+
Pull a long-form dataframe of all catchments and attributes
|
|
1268
|
+
|
|
1269
|
+
>>> out.subcatch_series(out.subcatchments, out.subcatch_attributes, columns=None)
|
|
1270
|
+
result
|
|
1271
|
+
datetime element attribute
|
|
1272
|
+
1900-01-01 00:05:00 SUB1 rainfall 0.03
|
|
1273
|
+
1900-01-01 00:10:00 SUB1 rainfall 0.03
|
|
1274
|
+
1900-01-01 00:15:00 SUB1 rainfall 0.03
|
|
1275
|
+
1900-01-01 00:20:00 SUB1 rainfall 0.03
|
|
1276
|
+
1900-01-01 00:25:00 SUB1 rainfall 0.03
|
|
1277
|
+
... ...
|
|
1278
|
+
1900-01-01 23:40:00 SUB3 sewage 0.00
|
|
1279
|
+
1900-01-01 23:45:00 SUB3 sewage 0.00
|
|
1280
|
+
1900-01-01 23:50:00 SUB3 sewage 0.00
|
|
1281
|
+
1900-01-01 23:55:00 SUB3 sewage 0.00
|
|
1282
|
+
1900-01-02 00:00:00 SUB3 sewage 0.00
|
|
1283
|
+
[9504 rows x 1 columns]
|
|
1284
|
+
|
|
1285
|
+
Pull two parameters for one subcatchment and plot the results
|
|
1286
|
+
|
|
1287
|
+
.. plot::
|
|
1288
|
+
|
|
1289
|
+
import matplotlib.pyplot as plt
|
|
1290
|
+
from matplotlib.dates import DateFormatter
|
|
1291
|
+
from swmm.pandas import Output,test_out_path
|
|
1292
|
+
|
|
1293
|
+
# read output file in Output object
|
|
1294
|
+
out = Output(test_out_path)
|
|
1295
|
+
|
|
1296
|
+
# pull rainfall and runoff_rate timeseries and plot them
|
|
1297
|
+
ax = out.subcatch_series('SUB1', ['rainfall', 'runoff_rate']).plot(figsize=(8,4))
|
|
1298
|
+
plt.title("SUB1 Params")
|
|
1299
|
+
plt.tight_layout()
|
|
1300
|
+
plt.show()
|
|
1301
|
+
|
|
1302
|
+
Pull the one parameter for all subcatchments
|
|
1303
|
+
|
|
1304
|
+
.. plot::
|
|
1305
|
+
|
|
1306
|
+
import matplotlib.pyplot as plt
|
|
1307
|
+
from matplotlib.dates import DateFormatter
|
|
1308
|
+
from swmm.pandas import Output,test_out_path
|
|
1309
|
+
|
|
1310
|
+
# read output file in Output object
|
|
1311
|
+
out = Output(test_out_path)
|
|
1312
|
+
|
|
1313
|
+
# pull runoff_rate timeseries for all cathments and plot them
|
|
1314
|
+
ax = out.subcatch_series(out.subcatchments, 'runoff_rate', columns='elem').plot(figsize=(8,4))
|
|
1315
|
+
plt.title("Runoff Rate")
|
|
1316
|
+
plt.tight_layout()
|
|
1317
|
+
plt.show()
|
|
1318
|
+
|
|
1319
|
+
|
|
1320
|
+
"""
|
|
1321
|
+
subcatchementArray, subcatchmentIndexArray = self._validateElement(
|
|
1322
|
+
subcatchment, self.subcatchments
|
|
1323
|
+
)
|
|
1324
|
+
|
|
1325
|
+
attributeArray, attributeIndexArray = self._validateAttribute(
|
|
1326
|
+
attribute, self.subcatch_attributes
|
|
1327
|
+
)
|
|
1328
|
+
|
|
1329
|
+
startIndex = self._time2step(start, 0)[0]
|
|
1330
|
+
endIndex = self._time2step(end, self._period)[0]
|
|
1331
|
+
|
|
1332
|
+
getter = (
|
|
1333
|
+
self._memory_series_getter("sub")
|
|
1334
|
+
if self._preload
|
|
1335
|
+
else output.get_subcatch_series
|
|
1336
|
+
)
|
|
1337
|
+
|
|
1338
|
+
values = self._model_series(
|
|
1339
|
+
subcatchmentIndexArray,
|
|
1340
|
+
attributeIndexArray,
|
|
1341
|
+
startIndex,
|
|
1342
|
+
endIndex,
|
|
1343
|
+
columns,
|
|
1344
|
+
getter,
|
|
1345
|
+
)
|
|
1346
|
+
|
|
1347
|
+
if not asframe:
|
|
1348
|
+
return values
|
|
1349
|
+
|
|
1350
|
+
dfIndex, cols = self._model_series_index(
|
|
1351
|
+
subcatchementArray, attributeArray, startIndex, endIndex, columns
|
|
1352
|
+
)
|
|
1353
|
+
return DataFrame(values, index=dfIndex, columns=cols)
|
|
1354
|
+
|
|
1355
|
+
@output_open_handler
|
|
1356
|
+
def node_series(
|
|
1357
|
+
self,
|
|
1358
|
+
node: int | str | Sequence[int | str] | None,
|
|
1359
|
+
attribute: int | str | EnumMeta | Sequence[int | str | EnumMeta] | None = (
|
|
1360
|
+
"invert_depth",
|
|
1361
|
+
"flooding_losses",
|
|
1362
|
+
"total_inflow",
|
|
1363
|
+
),
|
|
1364
|
+
start: str | int | datetime | None = None,
|
|
1365
|
+
end: str | int | datetime | None = None,
|
|
1366
|
+
columns: str | None = "attr",
|
|
1367
|
+
asframe: bool = True,
|
|
1368
|
+
) -> DataFrame | ndarray:
|
|
1369
|
+
"""Get one or more time series for one or more node attributes.
|
|
1370
|
+
Specify series start index and end index to get desired time range.
|
|
1371
|
+
|
|
1372
|
+
Parameters
|
|
1373
|
+
----------
|
|
1374
|
+
node: Union[int, str, Sequence[Union[int, str]], None]
|
|
1375
|
+
The node index or name.
|
|
1376
|
+
|
|
1377
|
+
attribute: int | str | EnumMeta | Sequence[int | str | EnumMeta] | None,
|
|
1378
|
+
The attribute index or name.
|
|
1379
|
+
|
|
1380
|
+
On of:
|
|
1381
|
+
|
|
1382
|
+
**invert_depth, hydraulic_head, ponded_volume, lateral_inflow,
|
|
1383
|
+
total_inflow, flooding_losses**.
|
|
1384
|
+
|
|
1385
|
+
defaults to: `('invert_depth','flooding_losses','total_inflow')`
|
|
1386
|
+
|
|
1387
|
+
Can also input the integer index of the attribute you would like to
|
|
1388
|
+
pull or the actual enum from Output.node_attributes.
|
|
1389
|
+
|
|
1390
|
+
Setting to None indicates all attributes.
|
|
1391
|
+
|
|
1392
|
+
start: Union[str, int, datetime, None], optional
|
|
1393
|
+
The start datetime or index of from which to return series, defaults to None.
|
|
1394
|
+
|
|
1395
|
+
Setting to None indicates simulation start.
|
|
1396
|
+
|
|
1397
|
+
end: Union[str, int, datetime, None], optional
|
|
1398
|
+
The end datetime or index of from which to return series, defaults to None.
|
|
1399
|
+
|
|
1400
|
+
Setting to None indicates simulation end.
|
|
1401
|
+
|
|
1402
|
+
columns: Optional[str], optional
|
|
1403
|
+
Decide whether or not to break out elements or attributes as columns. May be one of:
|
|
1404
|
+
|
|
1405
|
+
None: Return long-form data with one column for each data point
|
|
1406
|
+
|
|
1407
|
+
'elem': Return data with a column for each element. If more than one attribute are given, attribute names are added to the index.
|
|
1408
|
+
|
|
1409
|
+
'attr': Return data with a column for each attribute. If more than one element are given, element names are added to the index.
|
|
1410
|
+
|
|
1411
|
+
defaults to 'attr'.
|
|
1412
|
+
|
|
1413
|
+
asframe: bool
|
|
1414
|
+
A switch to return an indexed DataFrame. Set to False to get an array of values only, defaults to True.
|
|
1415
|
+
|
|
1416
|
+
Returns
|
|
1417
|
+
-------
|
|
1418
|
+
Union[pd.DataFrame,np.ndarray]
|
|
1419
|
+
A DataFrame or ndarray of attribute values in each column for requested
|
|
1420
|
+
date range and nodes.
|
|
1421
|
+
|
|
1422
|
+
Examples
|
|
1423
|
+
---------
|
|
1424
|
+
|
|
1425
|
+
Pull single time series for a single node
|
|
1426
|
+
|
|
1427
|
+
>>> from swmm.pandas import Output,test_out_path
|
|
1428
|
+
>>> out = Output(test_out_path)
|
|
1429
|
+
>>> out.node_series('JUNC2', 'invert_depth')
|
|
1430
|
+
invert_depth
|
|
1431
|
+
datetime
|
|
1432
|
+
1900-01-01 00:05:00 0.334742
|
|
1433
|
+
1900-01-01 00:10:00 0.509440
|
|
1434
|
+
1900-01-01 00:15:00 0.562722
|
|
1435
|
+
1900-01-01 00:20:00 0.602668
|
|
1436
|
+
1900-01-01 00:25:00 0.631424
|
|
1437
|
+
... ...
|
|
1438
|
+
1900-01-01 23:40:00 0.766949
|
|
1439
|
+
1900-01-01 23:45:00 0.766949
|
|
1440
|
+
1900-01-01 23:50:00 0.766949
|
|
1441
|
+
1900-01-01 23:55:00 0.766949
|
|
1442
|
+
1900-01-02 00:00:00 0.766949
|
|
1443
|
+
[288 rows x 1 columns]
|
|
1444
|
+
|
|
1445
|
+
Pull a wide-form dataframe for all parameters for a node
|
|
1446
|
+
|
|
1447
|
+
>>> out.node_series('JUNC2', out.node_attributes)
|
|
1448
|
+
invert_depth hydraulic_head ponded_volume lateral_inflow total_inflow flooding_losses groundwater pol_rainfall sewage
|
|
1449
|
+
datetime
|
|
1450
|
+
1900-01-01 00:05:00 0.334742 -0.705258 0.0 0.185754 0.185785 0.0 3.935642 0.000000 95.884094
|
|
1451
|
+
1900-01-01 00:10:00 0.509440 -0.530560 0.0 0.196764 0.197044 0.0 8.902034 0.000000 90.335831
|
|
1452
|
+
1900-01-01 00:15:00 0.562722 -0.477278 0.0 0.198615 0.199436 0.0 9.038609 0.000000 89.253334
|
|
1453
|
+
1900-01-01 00:20:00 0.602668 -0.437332 0.0 0.200802 0.202462 0.0 9.259741 0.000000 87.919571
|
|
1454
|
+
1900-01-01 00:25:00 0.631424 -0.408576 0.0 0.203108 0.205802 0.0 9.523322 0.000000 86.492836
|
|
1455
|
+
... ... ... ... ... ... ... ... ... ...
|
|
1456
|
+
1900-01-01 23:40:00 0.766949 -0.273052 0.0 0.314470 0.352183 0.0 15.293419 39.303375 45.430920
|
|
1457
|
+
1900-01-01 23:45:00 0.766949 -0.273052 0.0 0.314499 0.352183 0.0 15.313400 39.292118 45.430920
|
|
1458
|
+
1900-01-01 23:50:00 0.766949 -0.273052 0.0 0.314530 0.352183 0.0 15.333243 39.281300 45.430920
|
|
1459
|
+
1900-01-01 23:55:00 0.766949 -0.273052 0.0 0.314559 0.352183 0.0 15.352408 39.271194 45.430920
|
|
1460
|
+
1900-01-02 00:00:00 0.766949 -0.273052 0.0 0.314590 0.352183 0.0 15.371475 39.261478 45.430920
|
|
1461
|
+
[288 rows x 9 columns]
|
|
1462
|
+
|
|
1463
|
+
Pull a long-form dataframe of all nodes and attributes
|
|
1464
|
+
|
|
1465
|
+
>>> out.node_series('JUNC2', out.node_attributes, columns=None)
|
|
1466
|
+
result
|
|
1467
|
+
datetime element attribute
|
|
1468
|
+
1900-01-01 00:05:00 JUNC1 invert_depth 0.002143
|
|
1469
|
+
1900-01-01 00:10:00 JUNC1 invert_depth 0.010006
|
|
1470
|
+
1900-01-01 00:15:00 JUNC1 invert_depth 0.017985
|
|
1471
|
+
1900-01-01 00:20:00 JUNC1 invert_depth 0.025063
|
|
1472
|
+
1900-01-01 00:25:00 JUNC1 invert_depth 0.031329
|
|
1473
|
+
... ...
|
|
1474
|
+
1900-01-01 23:40:00 STOR1 sewage 51.502193
|
|
1475
|
+
1900-01-01 23:45:00 STOR1 sewage 51.164684
|
|
1476
|
+
1900-01-01 23:50:00 STOR1 sewage 50.905445
|
|
1477
|
+
1900-01-01 23:55:00 STOR1 sewage 50.715385
|
|
1478
|
+
1900-01-02 00:00:00 STOR1 sewage 50.574486
|
|
1479
|
+
[23328 rows x 1 columns]
|
|
1480
|
+
|
|
1481
|
+
Pull flow timeseries and calculate the total flow volume for all nodes
|
|
1482
|
+
|
|
1483
|
+
|
|
1484
|
+
>>> from swmm.pandas.constants import gal_per_cf
|
|
1485
|
+
>>> df = out.node_series(out.nodes, ['lateral_inflow','total_inflow','flooding_losses'])
|
|
1486
|
+
lateral_inflow total_inflow flooding_losses
|
|
1487
|
+
datetime element
|
|
1488
|
+
1900-01-01 00:05:00 JUNC1 0.002362 0.002362 0.0
|
|
1489
|
+
1900-01-01 00:10:00 JUNC1 0.005792 0.005792 0.0
|
|
1490
|
+
1900-01-01 00:15:00 JUNC1 0.006524 0.006524 0.0
|
|
1491
|
+
1900-01-01 00:20:00 JUNC1 0.007306 0.007306 0.0
|
|
1492
|
+
1900-01-01 00:25:00 JUNC1 0.008039 0.008039 0.0
|
|
1493
|
+
... ... ... ...
|
|
1494
|
+
1900-01-01 23:40:00 STOR1 0.000000 1.455056 0.0
|
|
1495
|
+
1900-01-01 23:45:00 STOR1 0.000000 1.455056 0.0
|
|
1496
|
+
1900-01-01 23:50:00 STOR1 0.000000 1.455056 0.0
|
|
1497
|
+
1900-01-01 23:55:00 STOR1 0.000000 1.455056 0.0
|
|
1498
|
+
1900-01-02 00:00:00 STOR1 0.000000 1.455056 0.0
|
|
1499
|
+
[2592 rows x 3 columns]
|
|
1500
|
+
#----------------------------------------------------------------------------
|
|
1501
|
+
# group by element name and sum,
|
|
1502
|
+
# then multiply by reporting timestep in seconds
|
|
1503
|
+
# then convert to millions of gallons
|
|
1504
|
+
>>> df.groupby('element').sum() * out.report * gal_per_cf / 1e6
|
|
1505
|
+
lateral_inflow total_inflow flooding_losses
|
|
1506
|
+
element
|
|
1507
|
+
JUNC1 0.101562 0.101898 0.000053
|
|
1508
|
+
JUNC2 0.544891 0.857012 0.000000
|
|
1509
|
+
JUNC3 0.000000 0.502078 0.080634
|
|
1510
|
+
JUNC4 1.813826 2.096243 0.317929
|
|
1511
|
+
JUNC5 0.000000 1.870291 0.073878
|
|
1512
|
+
JUNC6 0.000000 1.701455 0.000000
|
|
1513
|
+
OUT1 0.000000 1.698081 0.000000
|
|
1514
|
+
OUT2 0.000000 0.575617 0.000000
|
|
1515
|
+
STOR1 0.000000 1.862843 0.172482
|
|
1516
|
+
"""
|
|
1517
|
+
nodeArray, nodeIndexArray = self._validateElement(node, self.nodes)
|
|
1518
|
+
|
|
1519
|
+
attributeArray, attributeIndexArray = self._validateAttribute(
|
|
1520
|
+
attribute, self.node_attributes
|
|
1521
|
+
)
|
|
1522
|
+
|
|
1523
|
+
startIndex = self._time2step(start, 0)[0]
|
|
1524
|
+
endIndex = self._time2step(end, self._period)[0]
|
|
1525
|
+
|
|
1526
|
+
getter = (
|
|
1527
|
+
self._memory_series_getter("node")
|
|
1528
|
+
if self._preload
|
|
1529
|
+
else output.get_node_series
|
|
1530
|
+
)
|
|
1531
|
+
|
|
1532
|
+
values = self._model_series(
|
|
1533
|
+
nodeIndexArray,
|
|
1534
|
+
attributeIndexArray,
|
|
1535
|
+
startIndex,
|
|
1536
|
+
endIndex,
|
|
1537
|
+
columns,
|
|
1538
|
+
getter,
|
|
1539
|
+
)
|
|
1540
|
+
|
|
1541
|
+
if not asframe:
|
|
1542
|
+
return values
|
|
1543
|
+
|
|
1544
|
+
dfIndex, cols = self._model_series_index(
|
|
1545
|
+
nodeArray, attributeArray, startIndex, endIndex, columns
|
|
1546
|
+
)
|
|
1547
|
+
|
|
1548
|
+
return DataFrame(values, index=dfIndex, columns=cols)
|
|
1549
|
+
|
|
1550
|
+
@output_open_handler
|
|
1551
|
+
def link_series(
|
|
1552
|
+
self,
|
|
1553
|
+
link: int | str | Sequence[int | str] | None,
|
|
1554
|
+
attribute: int | str | EnumMeta | Sequence[int | str | EnumMeta] | None = (
|
|
1555
|
+
"flow_rate",
|
|
1556
|
+
"flow_velocity",
|
|
1557
|
+
"flow_depth",
|
|
1558
|
+
),
|
|
1559
|
+
start: int | str | datetime | None = None,
|
|
1560
|
+
end: int | str | datetime | None = None,
|
|
1561
|
+
columns: str | None = "attr",
|
|
1562
|
+
asframe: bool = True,
|
|
1563
|
+
) -> DataFrame | ndarray:
|
|
1564
|
+
"""Get one or more time series for one or more link attributes.
|
|
1565
|
+
Specify series start index and end index to get desired time range.
|
|
1566
|
+
|
|
1567
|
+
Parameters
|
|
1568
|
+
----------
|
|
1569
|
+
link: Union[int, str, Sequence[Union[int, str]], None]
|
|
1570
|
+
The link index or name.
|
|
1571
|
+
|
|
1572
|
+
attribute: int | str | EnumMeta | Sequence[int | str | EnumMeta] | None
|
|
1573
|
+
The attribute index or name.
|
|
1574
|
+
|
|
1575
|
+
On of:
|
|
1576
|
+
|
|
1577
|
+
**flow_rate, flow_depth, flow_velocity, flow_volume, capacity**.
|
|
1578
|
+
|
|
1579
|
+
defaults to: `('flow_rate','flow_velocity','flow_depth')`
|
|
1580
|
+
|
|
1581
|
+
Can also input the integer index of the attribute you would like to
|
|
1582
|
+
pull or the actual enum from output.link_attributes.
|
|
1583
|
+
|
|
1584
|
+
Setting to None indicates all attributes.
|
|
1585
|
+
|
|
1586
|
+
start_index: Union[str,int, datetime, None], optional
|
|
1587
|
+
The start datetime or index of from which to return series, defaults to None.
|
|
1588
|
+
|
|
1589
|
+
Setting to None indicates simulation start.
|
|
1590
|
+
|
|
1591
|
+
end_index: Union[str,int, datetime, None], optional
|
|
1592
|
+
The end datetime or index of from which to return series, defaults to None.
|
|
1593
|
+
|
|
1594
|
+
Setting to None indicates simulation end.
|
|
1595
|
+
|
|
1596
|
+
columns: Optional[str], optional
|
|
1597
|
+
Decide whether or not to break out elements or attributes as columns. May be one of:
|
|
1598
|
+
|
|
1599
|
+
None: Return long-form data with one column for each data point
|
|
1600
|
+
|
|
1601
|
+
'elem': Return data with a column for each element. If more than one attribute are given, attribute names are added to the index.
|
|
1602
|
+
|
|
1603
|
+
'attr': Return data with a column for each attribute. If more than one element are given, element names are added to the index.
|
|
1604
|
+
|
|
1605
|
+
defaults to 'attr'.
|
|
1606
|
+
|
|
1607
|
+
asframe: bool
|
|
1608
|
+
A switch to return an indexed DataFrame. Set to False to get an array of values only, defaults to True.
|
|
1609
|
+
|
|
1610
|
+
Returns
|
|
1611
|
+
-------
|
|
1612
|
+
Union[pd.DataFrame,np.ndarray]
|
|
1613
|
+
A DataFrame or ndarray of attribute values in each column for requested
|
|
1614
|
+
date range and links.
|
|
1615
|
+
|
|
1616
|
+
Examples
|
|
1617
|
+
---------
|
|
1618
|
+
|
|
1619
|
+
Pull flow rate for two conduits
|
|
1620
|
+
|
|
1621
|
+
>>> from swmm.pandas import Output,test_out_path
|
|
1622
|
+
>>> out = Output(test_out_path)
|
|
1623
|
+
>>> out.link_series(['COND1','COND6'],out.link_attributes.FLOW_RATE,columns='elem')
|
|
1624
|
+
COND1 COND6
|
|
1625
|
+
datetime
|
|
1626
|
+
1900-01-01 00:05:00 0.000031 0.0000
|
|
1627
|
+
1900-01-01 00:10:00 0.000280 0.0000
|
|
1628
|
+
1900-01-01 00:15:00 0.000820 0.0000
|
|
1629
|
+
1900-01-01 00:20:00 0.001660 0.0000
|
|
1630
|
+
1900-01-01 00:25:00 0.002694 0.0000
|
|
1631
|
+
... ... ...
|
|
1632
|
+
1900-01-01 23:40:00 0.037800 1.5028
|
|
1633
|
+
1900-01-01 23:45:00 0.037800 1.5028
|
|
1634
|
+
1900-01-01 23:50:00 0.037800 1.5028
|
|
1635
|
+
1900-01-01 23:55:00 0.037800 1.5028
|
|
1636
|
+
1900-01-02 00:00:00 0.037800 1.5028
|
|
1637
|
+
[288 rows x 2 columns]
|
|
1638
|
+
|
|
1639
|
+
Pull a wide-form dataframe for all parameters for a link
|
|
1640
|
+
|
|
1641
|
+
>>> out.node_series('COND1', out.link_attributes)
|
|
1642
|
+
flow_rate flow_depth ... pol_rainfall sewage
|
|
1643
|
+
datetime ...
|
|
1644
|
+
1900-01-01 00:05:00 0.000031 0.053857 ... 0.000000 0.000000e+00
|
|
1645
|
+
1900-01-01 00:10:00 0.000280 0.134876 ... 0.000000 0.000000e+00
|
|
1646
|
+
1900-01-01 00:15:00 0.000820 0.165356 ... 0.000000 0.000000e+00
|
|
1647
|
+
1900-01-01 00:20:00 0.001660 0.188868 ... 0.000000 0.000000e+00
|
|
1648
|
+
1900-01-01 00:25:00 0.002694 0.206378 ... 0.000000 0.000000e+00
|
|
1649
|
+
... ... ... ... ... ...
|
|
1650
|
+
1900-01-01 23:40:00 0.037800 0.312581 ... 68.344780 6.173063e-08
|
|
1651
|
+
1900-01-01 23:45:00 0.037800 0.312581 ... 68.242958 5.872794e-08
|
|
1652
|
+
1900-01-01 23:50:00 0.037800 0.312581 ... 68.144737 5.583060e-08
|
|
1653
|
+
1900-01-01 23:55:00 0.037800 0.312581 ... 68.052620 5.311425e-08
|
|
1654
|
+
1900-01-02 00:00:00 0.037800 0.312581 ... 67.963829 5.049533e-08
|
|
1655
|
+
|
|
1656
|
+
[288 rows x 8 columns]
|
|
1657
|
+
|
|
1658
|
+
Pull a long-form dataframe of all links and attributes
|
|
1659
|
+
|
|
1660
|
+
>>> out.link_series(out.links, out.link_attributes, columns=None)
|
|
1661
|
+
result
|
|
1662
|
+
datetime element attribute
|
|
1663
|
+
1900-01-01 00:05:00 COND1 flow_rate 0.000031
|
|
1664
|
+
1900-01-01 00:10:00 COND1 flow_rate 0.000280
|
|
1665
|
+
1900-01-01 00:15:00 COND1 flow_rate 0.000820
|
|
1666
|
+
1900-01-01 00:20:00 COND1 flow_rate 0.001660
|
|
1667
|
+
1900-01-01 00:25:00 COND1 flow_rate 0.002694
|
|
1668
|
+
... ...
|
|
1669
|
+
1900-01-01 23:40:00 WR1 sewage 45.430920
|
|
1670
|
+
1900-01-01 23:45:00 WR1 sewage 45.430920
|
|
1671
|
+
1900-01-01 23:50:00 WR1 sewage 45.430920
|
|
1672
|
+
1900-01-01 23:55:00 WR1 sewage 45.430920
|
|
1673
|
+
1900-01-02 00:00:00 WR1 sewage 45.430920
|
|
1674
|
+
|
|
1675
|
+
[18432 rows x 1 columns]
|
|
1676
|
+
|
|
1677
|
+
Pull flow timeseries and pollutant tracer concentrations for a link and plot
|
|
1678
|
+
|
|
1679
|
+
.. plot::
|
|
1680
|
+
|
|
1681
|
+
import matplotlib.pyplot as plt
|
|
1682
|
+
import matplotlib.dates as mdates
|
|
1683
|
+
from swmm.pandas import Output,test_out_path
|
|
1684
|
+
|
|
1685
|
+
out = Output(test_out_path)
|
|
1686
|
+
df = out.link_series('COND6',['flow_rate','groundwater','pol_rainfall','sewage'])
|
|
1687
|
+
|
|
1688
|
+
# set up figure
|
|
1689
|
+
fig,ax = plt.subplots(figsize=(8,4))
|
|
1690
|
+
|
|
1691
|
+
# plot flow rate on primary yaxis
|
|
1692
|
+
ax.plot(df.flow_rate,label="flow rate")
|
|
1693
|
+
|
|
1694
|
+
# plot pollutant concentrations on secondary axis
|
|
1695
|
+
# rainfall, DWF, and groundwater were given 100 mg/L pollutant
|
|
1696
|
+
# concentrations to serve as tracers
|
|
1697
|
+
ax1 = ax.twinx()
|
|
1698
|
+
ax1.plot(df.groundwater,ls = '--',label="groundwater tracer")
|
|
1699
|
+
ax1.plot(df.pol_rainfall,ls = '--',label="rainfall tracer")
|
|
1700
|
+
ax1.plot(df.sewage,ls = '--',label="sewage tracer")
|
|
1701
|
+
|
|
1702
|
+
# style axes
|
|
1703
|
+
ax.set_ylabel("Flow Rate (cfs)")
|
|
1704
|
+
ax.xaxis.set_major_formatter(mdates.DateFormatter("%H:%M"))
|
|
1705
|
+
ax1.set_ylabel("Percent")
|
|
1706
|
+
|
|
1707
|
+
# add legend and show figure
|
|
1708
|
+
fig.legend(bbox_to_anchor=(1,1),bbox_transform=ax.transAxes)
|
|
1709
|
+
fig.tight_layout()
|
|
1710
|
+
|
|
1711
|
+
fig.show()
|
|
1712
|
+
|
|
1713
|
+
"""
|
|
1714
|
+
linkArray, linkIndexArray = self._validateElement(link, self.links)
|
|
1715
|
+
|
|
1716
|
+
attributeArray, attributeIndexArray = self._validateAttribute(
|
|
1717
|
+
attribute, self.link_attributes
|
|
1718
|
+
)
|
|
1719
|
+
|
|
1720
|
+
startIndex = self._time2step(start, 0)[0]
|
|
1721
|
+
endIndex = self._time2step(end, self._period)[0]
|
|
1722
|
+
|
|
1723
|
+
getter = (
|
|
1724
|
+
self._memory_series_getter("link")
|
|
1725
|
+
if self._preload
|
|
1726
|
+
else output.get_link_series
|
|
1727
|
+
)
|
|
1728
|
+
|
|
1729
|
+
values = self._model_series(
|
|
1730
|
+
linkIndexArray,
|
|
1731
|
+
attributeIndexArray,
|
|
1732
|
+
startIndex,
|
|
1733
|
+
endIndex,
|
|
1734
|
+
columns,
|
|
1735
|
+
getter,
|
|
1736
|
+
)
|
|
1737
|
+
|
|
1738
|
+
if not asframe:
|
|
1739
|
+
return values
|
|
1740
|
+
|
|
1741
|
+
dfIndex, cols = self._model_series_index(
|
|
1742
|
+
linkArray, attributeArray, startIndex, endIndex, columns
|
|
1743
|
+
)
|
|
1744
|
+
|
|
1745
|
+
return DataFrame(values, index=dfIndex, columns=cols)
|
|
1746
|
+
|
|
1747
|
+
@output_open_handler
|
|
1748
|
+
def system_series(
|
|
1749
|
+
self,
|
|
1750
|
+
attribute: int | str | EnumMeta | Sequence[int | str | EnumMeta] | None = None,
|
|
1751
|
+
start: str | int | datetime | None = None,
|
|
1752
|
+
end: str | int | datetime | None = None,
|
|
1753
|
+
asframe: bool = True,
|
|
1754
|
+
) -> DataFrame | ndarray:
|
|
1755
|
+
"""Get one or more a time series for one or more system attributes.
|
|
1756
|
+
Specify series start index and end index to get desired time range.
|
|
1757
|
+
|
|
1758
|
+
Parameters
|
|
1759
|
+
----------
|
|
1760
|
+
attribute: int | str | EnumMeta | Sequence[int | str | EnumMeta] | None
|
|
1761
|
+
The attribute index or name.
|
|
1762
|
+
|
|
1763
|
+
On of:
|
|
1764
|
+
|
|
1765
|
+
**air_temp, rainfall, snow_depth, evap_infil_loss, runoff_flow,
|
|
1766
|
+
dry_weather_inflow, gw_inflow, rdii_inflow, direct_inflow, total_lateral_inflow,
|
|
1767
|
+
flood_losses, outfall_flows, volume_stored, evap_rate**.
|
|
1768
|
+
|
|
1769
|
+
defaults to `None`.
|
|
1770
|
+
|
|
1771
|
+
Can also input the integer index of the attribute you would like to
|
|
1772
|
+
pull or the actual enum from Output.system_attributes.
|
|
1773
|
+
|
|
1774
|
+
Setting to None indicates all attributes.
|
|
1775
|
+
|
|
1776
|
+
start_index: Union[str, int, datetime, None], optional
|
|
1777
|
+
The start datetime or index of from which to return series, defaults to None.
|
|
1778
|
+
|
|
1779
|
+
Setting to None indicates simulation start.
|
|
1780
|
+
|
|
1781
|
+
end_index: Union[str, int, datetime, None], optional
|
|
1782
|
+
The end datetime or index of from which to return series, defaults to None.
|
|
1783
|
+
|
|
1784
|
+
Setting to None indicates simulation end.
|
|
1785
|
+
|
|
1786
|
+
asframe: bool
|
|
1787
|
+
switch to return an indexed DataFrame. Set to False to get an array of values only, defaults to True
|
|
1788
|
+
|
|
1789
|
+
Returns
|
|
1790
|
+
-------
|
|
1791
|
+
Union[pd.DataFrame,np.ndarray]
|
|
1792
|
+
DataFrame or ndarray of attribute values in each column for request date range
|
|
1793
|
+
|
|
1794
|
+
Examples
|
|
1795
|
+
---------
|
|
1796
|
+
|
|
1797
|
+
Pull two system attribute time series
|
|
1798
|
+
|
|
1799
|
+
>>> from swmm.pandas import Output,test_out_path
|
|
1800
|
+
>>> out = Output(test_out_path)
|
|
1801
|
+
>>> out.system_series(['total_lateral_inflow','rainfall'])
|
|
1802
|
+
total_lateral_inflow rainfall
|
|
1803
|
+
datetime
|
|
1804
|
+
1900-01-01 00:05:00 0.902807 0.03000
|
|
1805
|
+
1900-01-01 00:10:00 0.902800 0.03000
|
|
1806
|
+
1900-01-01 00:15:00 0.902793 0.03000
|
|
1807
|
+
1900-01-01 00:20:00 0.902786 0.03000
|
|
1808
|
+
1900-01-01 00:25:00 0.902779 0.03000
|
|
1809
|
+
... ... ...
|
|
1810
|
+
1900-01-01 23:40:00 1.431874 0.03224
|
|
1811
|
+
1900-01-01 23:45:00 1.431869 0.03224
|
|
1812
|
+
1900-01-01 23:50:00 1.431876 0.03224
|
|
1813
|
+
1900-01-01 23:55:00 1.431894 0.03224
|
|
1814
|
+
1900-01-02 00:00:00 1.431921 0.00000
|
|
1815
|
+
[288 rows x 2 columns]
|
|
1816
|
+
|
|
1817
|
+
"""
|
|
1818
|
+
|
|
1819
|
+
attributeArray, attributeIndexArray = self._validateAttribute(
|
|
1820
|
+
attribute, self.system_attributes
|
|
1821
|
+
)
|
|
1822
|
+
|
|
1823
|
+
startIndex = self._time2step(start, 0)[0]
|
|
1824
|
+
endIndex = self._time2step(end, self._period)[0]
|
|
1825
|
+
|
|
1826
|
+
getter = (
|
|
1827
|
+
self._memory_series_getter("sys")
|
|
1828
|
+
if self._preload
|
|
1829
|
+
else output.get_system_series
|
|
1830
|
+
)
|
|
1831
|
+
|
|
1832
|
+
values = stack(
|
|
1833
|
+
[
|
|
1834
|
+
getter(self._handle, sysAttr, startIndex, endIndex)
|
|
1835
|
+
for sysAttr in attributeIndexArray
|
|
1836
|
+
],
|
|
1837
|
+
axis=1,
|
|
1838
|
+
)
|
|
1839
|
+
|
|
1840
|
+
if not asframe:
|
|
1841
|
+
return values
|
|
1842
|
+
|
|
1843
|
+
dfIndex = Index(self.timeIndex[startIndex:endIndex], name="datetime")
|
|
1844
|
+
return DataFrame(values, index=dfIndex, columns=attributeArray)
|
|
1845
|
+
|
|
1846
|
+
####### attribute getters #######
|
|
1847
|
+
|
|
1848
|
+
@output_open_handler
|
|
1849
|
+
def subcatch_attribute(
|
|
1850
|
+
self,
|
|
1851
|
+
time: str | int | datetime,
|
|
1852
|
+
attribute: int | str | EnumMeta | Sequence[int | str | EnumMeta] | None = (
|
|
1853
|
+
"rainfall",
|
|
1854
|
+
"runoff_rate",
|
|
1855
|
+
"gw_outflow_rate",
|
|
1856
|
+
),
|
|
1857
|
+
asframe: bool = True,
|
|
1858
|
+
) -> DataFrame | ndarray:
|
|
1859
|
+
"""For all subcatchments at a given time, get a one or more attributes.
|
|
1860
|
+
|
|
1861
|
+
Parameters
|
|
1862
|
+
----------
|
|
1863
|
+
time: Union[str, int, datetime]
|
|
1864
|
+
The datetime or simulation index for which to pull data, defaults to None.
|
|
1865
|
+
|
|
1866
|
+
attribute: int | str | EnumMeta | Sequence[int | str | EnumMeta] | None,
|
|
1867
|
+
The attribute index or name.
|
|
1868
|
+
|
|
1869
|
+
On of:
|
|
1870
|
+
|
|
1871
|
+
**rainfall, snow_depth, evap_loss, infil_loss, runoff_rate, gw_outflow_rate,
|
|
1872
|
+
gw_table_elev, soil_moisture**.
|
|
1873
|
+
|
|
1874
|
+
Defaults to: `('rainfall','runoff_rate','gw_outflow_rate').`
|
|
1875
|
+
|
|
1876
|
+
You can also input the integer index of the attribute you would like to
|
|
1877
|
+
pull or the actual enum from Output.subcatch_attributes.
|
|
1878
|
+
|
|
1879
|
+
Setting to None indicates all attributes.
|
|
1880
|
+
|
|
1881
|
+
asframe: bool
|
|
1882
|
+
A switch to return an indexed DataFrame. Set to False to get an array of values only, defaults to True.
|
|
1883
|
+
|
|
1884
|
+
Returns
|
|
1885
|
+
-------
|
|
1886
|
+
Union[pd.DataFrame, np.ndarray]
|
|
1887
|
+
A DataFrame or ndarray of attribute values in each column for requested simulation time.
|
|
1888
|
+
|
|
1889
|
+
Examples
|
|
1890
|
+
---------
|
|
1891
|
+
Pull rainfall for all catchments at start of simulation
|
|
1892
|
+
|
|
1893
|
+
>>> from swmm.pandas import Output,test_out_path
|
|
1894
|
+
>>> out = Output(test_out_path)
|
|
1895
|
+
>>> out.subcatch_attribute(0,'rainfall')
|
|
1896
|
+
rainfall
|
|
1897
|
+
subcatchment
|
|
1898
|
+
SUB1 0.03
|
|
1899
|
+
SUB2 0.03
|
|
1900
|
+
SUB3 0.03
|
|
1901
|
+
"""
|
|
1902
|
+
|
|
1903
|
+
attributeArray, attributeIndexArray = self._validateAttribute(
|
|
1904
|
+
attribute, self.subcatch_attributes
|
|
1905
|
+
)
|
|
1906
|
+
|
|
1907
|
+
timeIndex = self._time2step([time])[0]
|
|
1908
|
+
|
|
1909
|
+
values = stack(
|
|
1910
|
+
[
|
|
1911
|
+
output.get_subcatch_attribute(self._handle, timeIndex, scAttr)
|
|
1912
|
+
for scAttr in attributeIndexArray
|
|
1913
|
+
],
|
|
1914
|
+
axis=1,
|
|
1915
|
+
)
|
|
1916
|
+
|
|
1917
|
+
if not asframe:
|
|
1918
|
+
return values
|
|
1919
|
+
|
|
1920
|
+
dfIndex = Index(self.subcatchments, name="subcatchment")
|
|
1921
|
+
|
|
1922
|
+
return DataFrame(values, index=dfIndex, columns=attributeArray)
|
|
1923
|
+
|
|
1924
|
+
@output_open_handler
|
|
1925
|
+
def node_attribute(
|
|
1926
|
+
self,
|
|
1927
|
+
time: str | int | datetime,
|
|
1928
|
+
attribute: int | str | EnumMeta | Sequence[int | str | EnumMeta] | None = (
|
|
1929
|
+
"invert_depth",
|
|
1930
|
+
"flooding_losses",
|
|
1931
|
+
"total_inflow",
|
|
1932
|
+
),
|
|
1933
|
+
asframe: bool = True,
|
|
1934
|
+
) -> DataFrame | ndarray:
|
|
1935
|
+
"""For all nodes at a given time, get one or more attributes.
|
|
1936
|
+
|
|
1937
|
+
Parameters
|
|
1938
|
+
----------
|
|
1939
|
+
time: Union[str, int, datetime]
|
|
1940
|
+
The datetime or simulation index for which to pull data, defaults to None
|
|
1941
|
+
|
|
1942
|
+
attribute: int | str | EnumMeta | Sequence[int | str | EnumMeta] | None,
|
|
1943
|
+
The attribute index or name.
|
|
1944
|
+
|
|
1945
|
+
On of:
|
|
1946
|
+
|
|
1947
|
+
**invert_depth, hydraulic_head, ponded_volume, lateral_inflow,
|
|
1948
|
+
total_inflow, flooding_losses**.
|
|
1949
|
+
|
|
1950
|
+
defaults to: `('invert_depth','flooding_losses','total_inflow')`
|
|
1951
|
+
|
|
1952
|
+
Can also input the integer index of the attribute you would like to
|
|
1953
|
+
pull or the actual enum from Output.node_attributes.
|
|
1954
|
+
|
|
1955
|
+
Setting to None indicates all attributes.
|
|
1956
|
+
|
|
1957
|
+
asframe: bool
|
|
1958
|
+
A switch to return an indexed DataFrame. Set to False to get an array of values only, defaults to True.
|
|
1959
|
+
|
|
1960
|
+
Returns
|
|
1961
|
+
-------
|
|
1962
|
+
Union[pd.DataFrame, np.ndarray]
|
|
1963
|
+
A DataFrame or ndarray of attribute values in each column for requested simulation time.
|
|
1964
|
+
|
|
1965
|
+
Examples
|
|
1966
|
+
---------
|
|
1967
|
+
Pull all attributes from middle of simulation
|
|
1968
|
+
|
|
1969
|
+
>>> from swmm.pandas import Output,test_out_path
|
|
1970
|
+
>>> out = Output(test_out_path)
|
|
1971
|
+
>>> out.node_attribute(out.period/2)
|
|
1972
|
+
invert_depth hydraulic_head ponded_volume ... groundwater pol_rainfall sewage
|
|
1973
|
+
node ...
|
|
1974
|
+
JUNC1 8.677408 10.177408 0.000000 ... 0.260937 99.739067 0.000000
|
|
1975
|
+
JUNC2 4.286304 3.246305 0.000000 ... 0.366218 96.767433 2.475719
|
|
1976
|
+
JUNC3 11.506939 8.036940 35.862713 ... 0.615687 94.522049 4.862284
|
|
1977
|
+
JUNC4 14.936149 9.686150 6107.279785 ... 0.381425 96.532028 3.086555
|
|
1978
|
+
JUNC5 11.190232 4.690233 0.000000 ... 0.443388 95.959351 3.597255
|
|
1979
|
+
JUNC6 1.650765 1.650765 0.000000 ... 0.963940 91.113075 7.922997
|
|
1980
|
+
OUT1 0.946313 1.046313 0.000000 ... 0.969624 91.060143 7.970241
|
|
1981
|
+
OUT2 0.000000 -1.040001 0.000000 ... 0.367271 96.756134 2.479369
|
|
1982
|
+
STOR1 18.282972 3.032968 7550.865723 ... 0.961457 91.136200 7.902364
|
|
1983
|
+
[9 rows x 9 columns]
|
|
1984
|
+
"""
|
|
1985
|
+
attributeArray, attributeIndexArray = self._validateAttribute(
|
|
1986
|
+
attribute, self.node_attributes
|
|
1987
|
+
)
|
|
1988
|
+
|
|
1989
|
+
timeIndex = self._time2step([time])[0]
|
|
1990
|
+
|
|
1991
|
+
values = stack(
|
|
1992
|
+
[
|
|
1993
|
+
output.get_node_attribute(self._handle, timeIndex, ndAttr)
|
|
1994
|
+
for ndAttr in attributeIndexArray
|
|
1995
|
+
],
|
|
1996
|
+
axis=1,
|
|
1997
|
+
)
|
|
1998
|
+
|
|
1999
|
+
if not asframe:
|
|
2000
|
+
return values
|
|
2001
|
+
|
|
2002
|
+
dfIndex = Index(self.nodes, name="node")
|
|
2003
|
+
|
|
2004
|
+
return DataFrame(values, index=dfIndex, columns=attributeArray)
|
|
2005
|
+
|
|
2006
|
+
@output_open_handler
|
|
2007
|
+
def link_attribute(
|
|
2008
|
+
self,
|
|
2009
|
+
time: str | int | datetime,
|
|
2010
|
+
attribute: int | str | EnumMeta | Sequence[int | str | EnumMeta] | None = (
|
|
2011
|
+
"flow_rate",
|
|
2012
|
+
"flow_velocity",
|
|
2013
|
+
"flow_depth",
|
|
2014
|
+
),
|
|
2015
|
+
asframe: bool = True,
|
|
2016
|
+
) -> DataFrame | ndarray:
|
|
2017
|
+
"""For all links at a given time, get one or more attributes.
|
|
2018
|
+
|
|
2019
|
+
Parameters
|
|
2020
|
+
----------
|
|
2021
|
+
time: Union[str, int, datetime]
|
|
2022
|
+
The datetime or simulation index for which to pull data, defaults to None.
|
|
2023
|
+
|
|
2024
|
+
attribute: int | str | EnumMeta | Sequence[int | str | EnumMeta] | None
|
|
2025
|
+
The attribute index or name.
|
|
2026
|
+
|
|
2027
|
+
On of:
|
|
2028
|
+
|
|
2029
|
+
flow_rate, flow_depth, flow_velocity, flow_volume, capacity,
|
|
2030
|
+
|
|
2031
|
+
defaults to `('flow_rate','flow_velocity','flow_depth')`
|
|
2032
|
+
|
|
2033
|
+
Can also input the integer index of the attribute you would like to
|
|
2034
|
+
pull or the actual enum from Output.link_attributes.
|
|
2035
|
+
|
|
2036
|
+
Setting to None indicates all attributes.
|
|
2037
|
+
|
|
2038
|
+
asframe: bool
|
|
2039
|
+
A switch to return an indexed DataFrame. Set to False to get an array of values only, defaults to True.
|
|
2040
|
+
|
|
2041
|
+
Returns
|
|
2042
|
+
-------
|
|
2043
|
+
pd.DataFrame
|
|
2044
|
+
A DataFrame of attribute values in each column for requested simulation time.
|
|
2045
|
+
|
|
2046
|
+
Examples
|
|
2047
|
+
---------
|
|
2048
|
+
Pull depth. flooding, and total inflow attributes from end of simulation
|
|
2049
|
+
|
|
2050
|
+
>>> from swmm.pandas import Output,test_out_path
|
|
2051
|
+
>>> out = Output(test_out_path)
|
|
2052
|
+
>>> out.link_attribute(out.period/2)
|
|
2053
|
+
invert_depth flooding_losses total_inflow
|
|
2054
|
+
node
|
|
2055
|
+
JUNC1 8.677408 0.000000 2.665294
|
|
2056
|
+
JUNC2 4.286304 0.000000 14.571551
|
|
2057
|
+
JUNC3 11.506939 0.341040 2.319820
|
|
2058
|
+
JUNC4 14.936149 16.137648 27.521870
|
|
2059
|
+
JUNC5 11.190232 0.000000 9.051201
|
|
2060
|
+
JUNC6 1.650765 0.000000 5.799996
|
|
2061
|
+
OUT1 0.946313 0.000000 5.799996
|
|
2062
|
+
OUT2 0.000000 0.000000 14.574173
|
|
2063
|
+
STOR1 18.282972 0.000000 9.048394
|
|
2064
|
+
"""
|
|
2065
|
+
attributeArray, attributeIndexArray = self._validateAttribute(
|
|
2066
|
+
attribute, self.link_attributes
|
|
2067
|
+
)
|
|
2068
|
+
|
|
2069
|
+
timeIndex = self._time2step([time])[0]
|
|
2070
|
+
|
|
2071
|
+
values = stack(
|
|
2072
|
+
[
|
|
2073
|
+
output.get_link_attribute(self._handle, timeIndex, lnkAttr)
|
|
2074
|
+
for lnkAttr in attributeIndexArray
|
|
2075
|
+
],
|
|
2076
|
+
axis=1,
|
|
2077
|
+
)
|
|
2078
|
+
|
|
2079
|
+
if not asframe:
|
|
2080
|
+
return values
|
|
2081
|
+
|
|
2082
|
+
dfIndex = Index(self.links, name="link")
|
|
2083
|
+
|
|
2084
|
+
return DataFrame(values, index=dfIndex, columns=attributeArray)
|
|
2085
|
+
|
|
2086
|
+
@output_open_handler
|
|
2087
|
+
def system_attribute(
|
|
2088
|
+
self,
|
|
2089
|
+
time: str | int | datetime,
|
|
2090
|
+
attribute: int | str | EnumMeta | Sequence[int | str | EnumMeta] | None = None,
|
|
2091
|
+
asframe=True,
|
|
2092
|
+
) -> DataFrame | ndarray:
|
|
2093
|
+
"""For all nodes at given time, get a one or more attributes.
|
|
2094
|
+
|
|
2095
|
+
Parameters
|
|
2096
|
+
----------
|
|
2097
|
+
time: Union[str, int, datetime]
|
|
2098
|
+
The datetime or simulation index for which to pull data, defaults to None.
|
|
2099
|
+
|
|
2100
|
+
attribute: Union[int, str, Sequence[Union[int, str]], None]
|
|
2101
|
+
The attribute index or name.
|
|
2102
|
+
|
|
2103
|
+
On of:
|
|
2104
|
+
|
|
2105
|
+
**air_temp, rainfall, snow_depth, evap_infil_loss, runoff_flow,
|
|
2106
|
+
dry_weather_inflow, gw_inflow, rdii_inflow, direct_inflow, total_lateral_inflow,
|
|
2107
|
+
flood_losses, outfall_flows, volume_stored, evap_rate**.
|
|
2108
|
+
|
|
2109
|
+
defaults to `None`.
|
|
2110
|
+
|
|
2111
|
+
Can also input the integer index of the attribute you would like to
|
|
2112
|
+
pull or the actual enum from Output.system_attributes.
|
|
2113
|
+
|
|
2114
|
+
Setting to None indicates all attributes.
|
|
2115
|
+
|
|
2116
|
+
asframe: bool
|
|
2117
|
+
A switch to return an indexed DataFrame. Set to False to get an array of values only, defaults to True.
|
|
2118
|
+
|
|
2119
|
+
Returns
|
|
2120
|
+
-------
|
|
2121
|
+
Union[pd.DataFrame,np.ndarray]
|
|
2122
|
+
A DataFrame of attribute values in each column for requested simulation time.
|
|
2123
|
+
|
|
2124
|
+
Examples
|
|
2125
|
+
---------
|
|
2126
|
+
|
|
2127
|
+
Pull all system attributes for the 10th time step
|
|
2128
|
+
|
|
2129
|
+
>>> from swmm.pandas import Output,test_out_path
|
|
2130
|
+
>>> out = Output(test_out_path)
|
|
2131
|
+
>>> out.system_attribute(10)
|
|
2132
|
+
result
|
|
2133
|
+
attribute
|
|
2134
|
+
air_temp 70.000000
|
|
2135
|
+
rainfall 0.030000
|
|
2136
|
+
snow_depth 0.000000
|
|
2137
|
+
evap_infil_loss 0.015042
|
|
2138
|
+
runoff_flow 0.066304
|
|
2139
|
+
dry_weather_inflow 0.801000
|
|
2140
|
+
gw_inflow 0.101737
|
|
2141
|
+
rdii_inflow 0.000000
|
|
2142
|
+
direct_inflow 0.000000
|
|
2143
|
+
total_lateral_inflow 0.969041
|
|
2144
|
+
flood_losses 0.000000
|
|
2145
|
+
outfall_flows 0.944981
|
|
2146
|
+
volume_stored 1731.835938
|
|
2147
|
+
evap_rate 0.000000
|
|
2148
|
+
ptnl_evap_rate 0.000000
|
|
2149
|
+
"""
|
|
2150
|
+
|
|
2151
|
+
attributeArray, attributeIndexArray = self._validateAttribute(
|
|
2152
|
+
attribute, self.system_attributes
|
|
2153
|
+
)
|
|
2154
|
+
|
|
2155
|
+
timeIndex = self._time2step([time])[0]
|
|
2156
|
+
|
|
2157
|
+
values = asarray(
|
|
2158
|
+
[
|
|
2159
|
+
output.get_system_attribute(self._handle, timeIndex, sysAttr)
|
|
2160
|
+
for sysAttr in attributeIndexArray
|
|
2161
|
+
]
|
|
2162
|
+
)
|
|
2163
|
+
|
|
2164
|
+
if not asframe:
|
|
2165
|
+
return values
|
|
2166
|
+
|
|
2167
|
+
dfIndex = Index(attributeArray, name="attribute")
|
|
2168
|
+
|
|
2169
|
+
return DataFrame(values, index=dfIndex, columns=["result"])
|
|
2170
|
+
|
|
2171
|
+
####### result getters #######
|
|
2172
|
+
|
|
2173
|
+
@output_open_handler
|
|
2174
|
+
def subcatch_result(
|
|
2175
|
+
self,
|
|
2176
|
+
subcatchment: int | str | Sequence[int | str] | None,
|
|
2177
|
+
time: int | str | Sequence[int | str] | None,
|
|
2178
|
+
asframe: bool = True,
|
|
2179
|
+
) -> DataFrame | ndarray:
|
|
2180
|
+
"""For a subcatchment at one or more given times, get all attributes.
|
|
2181
|
+
|
|
2182
|
+
Only one of `subcatchment` or `time` can be multiple (eg. a list), not both.
|
|
2183
|
+
|
|
2184
|
+
Parameters
|
|
2185
|
+
----------
|
|
2186
|
+
subcatchment: Union[int, str, Sequence[Union[int, str]], None],
|
|
2187
|
+
The subcatchment(s) name(s) or index(s).
|
|
2188
|
+
|
|
2189
|
+
time: Union[int, str, Sequence[Union[int, str]], None],
|
|
2190
|
+
THe datetime(s) or simulation index(s).
|
|
2191
|
+
|
|
2192
|
+
asframe: bool
|
|
2193
|
+
A switch to return an indexed DataFrame. Set to False to get an array of values only, defaults to True.
|
|
2194
|
+
|
|
2195
|
+
Returns
|
|
2196
|
+
-------
|
|
2197
|
+
Union[pd.DataFrame,np.ndarray]
|
|
2198
|
+
A DataFrame or ndarray of all attribute values subcatchment(s) at given time(s).
|
|
2199
|
+
|
|
2200
|
+
Examples
|
|
2201
|
+
---------
|
|
2202
|
+
|
|
2203
|
+
Pull all attributes at start, middle, and end time steps for a single catchment
|
|
2204
|
+
|
|
2205
|
+
>>> from swmm.pandas import Output,test_out_path
|
|
2206
|
+
>>> out = Output(test_out_path)
|
|
2207
|
+
>>> out.subcatch_result("SUB1",[0,out.period/2,out.period-1])
|
|
2208
|
+
rainfall snow_depth evap_loss infil_loss ... soil_moisture groundwater pol_rainfall sewage
|
|
2209
|
+
datetime ...
|
|
2210
|
+
1900-01-01 00:05:00 0.030 0.0 0.0 0.020820 ... 0.276035 0.0 0.0 0.0
|
|
2211
|
+
1900-01-01 12:05:00 1.212 0.0 0.0 0.594862 ... 0.281631 0.0 100.0 0.0
|
|
2212
|
+
1900-01-02 00:00:00 0.000 0.0 0.0 0.027270 ... 0.280026 0.0 100.0 0.0
|
|
2213
|
+
[3 rows x 11 columns]
|
|
2214
|
+
|
|
2215
|
+
Pull all attributes for all catchments at the start of the simulation
|
|
2216
|
+
|
|
2217
|
+
>>> from swmm.pandas import Output,test_out_path
|
|
2218
|
+
>>> out = Output(test_out_path)
|
|
2219
|
+
>>> out.subcatch_result(out.subcatchments,'1900-01-01')
|
|
2220
|
+
rainfall snow_depth evap_loss infil_loss ... soil_moisture groundwater pol_rainfall sewage
|
|
2221
|
+
subcatchment ...
|
|
2222
|
+
SUB1 0.03 0.0 0.0 0.020820 ... 0.276035 0.0 0.0 0.0
|
|
2223
|
+
SUB2 0.03 0.0 0.0 0.017824 ... 0.275048 0.0 0.0 0.0
|
|
2224
|
+
SUB3 0.03 0.0 0.0 0.011365 ... 0.279013 0.0 0.0 0.0
|
|
2225
|
+
[3 rows x 11 columns]
|
|
2226
|
+
"""
|
|
2227
|
+
|
|
2228
|
+
if isinstance(subcatchment, arrayish) and isinstance(time, arrayish):
|
|
2229
|
+
raise Exception("Can only have multiple of one of subcatchment and time")
|
|
2230
|
+
|
|
2231
|
+
elif isinstance(subcatchment, arrayish):
|
|
2232
|
+
label = "subcatchment"
|
|
2233
|
+
labels, indices = self._validateElement(subcatchment, self.subcatchments)
|
|
2234
|
+
timeIndex = self._time2step([time])[0] # type: ignore
|
|
2235
|
+
|
|
2236
|
+
values = vstack(
|
|
2237
|
+
[
|
|
2238
|
+
output.get_subcatch_result(self._handle, timeIndex, idx)
|
|
2239
|
+
for idx in indices
|
|
2240
|
+
]
|
|
2241
|
+
)
|
|
2242
|
+
|
|
2243
|
+
else:
|
|
2244
|
+
label = "datetime"
|
|
2245
|
+
times = self.timeIndex if time is None else atleast_1d(time)
|
|
2246
|
+
indices = self._time2step(times)
|
|
2247
|
+
|
|
2248
|
+
# since the timeIndex matches on nearst, we rebuild
|
|
2249
|
+
# the label in case it wasn't exact
|
|
2250
|
+
labels = self.timeIndex[indices]
|
|
2251
|
+
subcatchmentIndex = self._subcatchmentIndex(subcatchment)
|
|
2252
|
+
|
|
2253
|
+
values = atleast_2d(
|
|
2254
|
+
vstack(
|
|
2255
|
+
[
|
|
2256
|
+
output.get_subcatch_result(self._handle, idx, subcatchmentIndex)
|
|
2257
|
+
for idx in indices
|
|
2258
|
+
]
|
|
2259
|
+
)
|
|
2260
|
+
)
|
|
2261
|
+
|
|
2262
|
+
if not asframe:
|
|
2263
|
+
return values
|
|
2264
|
+
|
|
2265
|
+
dfIndex = Index(labels, name=label)
|
|
2266
|
+
|
|
2267
|
+
return DataFrame(
|
|
2268
|
+
values, index=dfIndex, columns=_enum_keys(self.subcatch_attributes)
|
|
2269
|
+
)
|
|
2270
|
+
|
|
2271
|
+
@output_open_handler
|
|
2272
|
+
def node_result(
|
|
2273
|
+
self,
|
|
2274
|
+
node: int | str | Sequence[int | str] | None,
|
|
2275
|
+
time: int | str | Sequence[int | str] | None,
|
|
2276
|
+
asframe: bool = True,
|
|
2277
|
+
) -> DataFrame | ndarray:
|
|
2278
|
+
"""For one or more nodes at one or more given times, get all attributes.
|
|
2279
|
+
|
|
2280
|
+
Only one of `node` or `time` can be multiple (eg. a list), not both.
|
|
2281
|
+
|
|
2282
|
+
Parameters
|
|
2283
|
+
----------
|
|
2284
|
+
node: Union[int, str, Sequence[Union[int, str]], None],
|
|
2285
|
+
The node(s) name(s) or index(s).
|
|
2286
|
+
|
|
2287
|
+
time: Union[int, str, Sequence[Union[int, str]], None],
|
|
2288
|
+
The datetime(s) or simulation index(s).
|
|
2289
|
+
|
|
2290
|
+
asframe: bool
|
|
2291
|
+
A switch to return an indexed DataFrame. Set to False to get an array of values only, defaults to True.
|
|
2292
|
+
|
|
2293
|
+
Returns
|
|
2294
|
+
-------
|
|
2295
|
+
Union[pd.DataFrame,np.ndarray]
|
|
2296
|
+
A DataFrame or ndarray of all attribute values nodes(s) at given time(s).
|
|
2297
|
+
|
|
2298
|
+
Examples
|
|
2299
|
+
---------
|
|
2300
|
+
|
|
2301
|
+
Pull all attributes at start, middle, and end time steps for a single node
|
|
2302
|
+
|
|
2303
|
+
>>> from swmm.pandas import Output,test_out_path
|
|
2304
|
+
>>> out = Output(test_out_path)
|
|
2305
|
+
>>> out.node_result("JUNC1",[0,out.period/2,out.period-1])
|
|
2306
|
+
invert_depth hydraulic_head ponded_volume lateral_inflow ... flooding_losses groundwater pol_rainfall sewage
|
|
2307
|
+
datetime ...
|
|
2308
|
+
1900-01-01 00:05:00 0.002143 1.502143 0.0 0.002362 ... 0.0 84.334671 0.000000 0.0
|
|
2309
|
+
1900-01-01 12:05:00 8.677408 10.177408 0.0 2.665294 ... 0.0 0.260937 99.739067 0.0
|
|
2310
|
+
1900-01-02 00:00:00 0.108214 1.608214 0.0 0.037889 ... 0.0 33.929119 66.251686 0.0
|
|
2311
|
+
[3 rows x 9 columns]
|
|
2312
|
+
|
|
2313
|
+
Pull all attributes for all nodes at the start of the simulation
|
|
2314
|
+
|
|
2315
|
+
>>> from swmm.pandas import Output,test_out_path
|
|
2316
|
+
>>> out = Output(test_out_path)
|
|
2317
|
+
>>> out.node_result(out.nodes,'1900-01-01')
|
|
2318
|
+
invert_depth hydraulic_head ponded_volume lateral_inflow total_inflow flooding_losses groundwater pol_rainfall sewage
|
|
2319
|
+
node
|
|
2320
|
+
JUNC1 0.002143 1.502143 0.0 0.002362 0.002362 0.0 84.334671 0.0 0.000000
|
|
2321
|
+
JUNC2 0.334742 -0.705258 0.0 0.185754 0.185785 0.0 3.935642 0.0 95.884094
|
|
2322
|
+
JUNC3 0.000000 -3.470001 0.0 0.000000 0.000000 0.0 0.000000 0.0 0.000000
|
|
2323
|
+
JUNC4 0.530241 -4.719759 0.0 0.657521 0.657521 0.0 5.066027 0.0 94.864769
|
|
2324
|
+
JUNC5 0.090128 -6.409873 0.0 0.000000 0.027627 0.0 2.723724 0.0 82.198524
|
|
2325
|
+
JUNC6 0.000000 0.000000 0.0 0.000000 0.000000 0.0 0.000000 0.0 0.000000
|
|
2326
|
+
OUT1 0.000000 0.100000 0.0 0.000000 0.000000 0.0 0.000000 0.0 0.000000
|
|
2327
|
+
OUT2 0.000000 -1.040000 0.0 0.000000 0.000000 0.0 0.000000 0.0 0.000000
|
|
2328
|
+
STOR1 0.000000 -15.250000 0.0 0.000000 0.000000 0.0 0.000000 0.0 0.000000
|
|
2329
|
+
"""
|
|
2330
|
+
if isinstance(node, arrayish) and isinstance(time, arrayish):
|
|
2331
|
+
raise Exception("Can only have multiple of one of node and time")
|
|
2332
|
+
|
|
2333
|
+
elif isinstance(node, arrayish):
|
|
2334
|
+
label = "node"
|
|
2335
|
+
labels, indices = self._validateElement(node, self.nodes)
|
|
2336
|
+
timeIndex = self._time2step([time])[0]
|
|
2337
|
+
values = vstack(
|
|
2338
|
+
[
|
|
2339
|
+
output.get_node_result(self._handle, timeIndex, idx)
|
|
2340
|
+
for idx in indices
|
|
2341
|
+
]
|
|
2342
|
+
)
|
|
2343
|
+
|
|
2344
|
+
else:
|
|
2345
|
+
label = "datetime"
|
|
2346
|
+
times = self.timeIndex if time is None else atleast_1d(time)
|
|
2347
|
+
indices = self._time2step(times)
|
|
2348
|
+
|
|
2349
|
+
# since the timeIndex matches on nearst, we rebuild
|
|
2350
|
+
# the label in case it wasn't exact
|
|
2351
|
+
labels = self.timeIndex[indices]
|
|
2352
|
+
nodeIndex = self._nodeIndex(node)
|
|
2353
|
+
|
|
2354
|
+
values = atleast_2d(
|
|
2355
|
+
vstack(
|
|
2356
|
+
[
|
|
2357
|
+
output.get_node_result(self._handle, idx, nodeIndex)
|
|
2358
|
+
for idx in indices
|
|
2359
|
+
]
|
|
2360
|
+
)
|
|
2361
|
+
)
|
|
2362
|
+
|
|
2363
|
+
if not asframe:
|
|
2364
|
+
return values
|
|
2365
|
+
|
|
2366
|
+
dfIndex = Index(labels, name=label)
|
|
2367
|
+
|
|
2368
|
+
return DataFrame(
|
|
2369
|
+
values, index=dfIndex, columns=_enum_keys(self.node_attributes)
|
|
2370
|
+
)
|
|
2371
|
+
|
|
2372
|
+
@output_open_handler
|
|
2373
|
+
def link_result(
|
|
2374
|
+
self,
|
|
2375
|
+
link: int | str | Sequence[int | str] | None,
|
|
2376
|
+
time: int | str | Sequence[int | str] | None,
|
|
2377
|
+
asframe: bool = True,
|
|
2378
|
+
) -> DataFrame | ndarray:
|
|
2379
|
+
"""For a link at one or more given times, get all attributes.
|
|
2380
|
+
|
|
2381
|
+
Only one of link or time can be multiple.
|
|
2382
|
+
|
|
2383
|
+
Parameters
|
|
2384
|
+
----------
|
|
2385
|
+
link: Union[int, str, Sequence[Union[int, str]], None],
|
|
2386
|
+
The link(s) name(s) or index(s).
|
|
2387
|
+
|
|
2388
|
+
time: Union[int, str, Sequence[Union[int, str]], None],
|
|
2389
|
+
The datetime(s) or simulation index(s).
|
|
2390
|
+
|
|
2391
|
+
asframe: bool
|
|
2392
|
+
A switch to return an indexed DataFrame. Set to False to get an array of values only, defaults to True.
|
|
2393
|
+
|
|
2394
|
+
Returns
|
|
2395
|
+
-------
|
|
2396
|
+
Union[pd.DataFrame,np.ndarray]
|
|
2397
|
+
A DataFrame or ndarray of all attribute values link(s) at given time(s).
|
|
2398
|
+
|
|
2399
|
+
Examples
|
|
2400
|
+
---------
|
|
2401
|
+
|
|
2402
|
+
Pull all attributes at start, middle, and end time steps for a single link
|
|
2403
|
+
|
|
2404
|
+
>>> from swmm.pandas import Output,test_out_path
|
|
2405
|
+
>>> out = Output(test_out_path)
|
|
2406
|
+
>>> out.link_result("COND1",[0,out.period/2,out.period-1])
|
|
2407
|
+
flow_rate flow_depth flow_velocity flow_volume capacity groundwater pol_rainfall sewage
|
|
2408
|
+
datetime
|
|
2409
|
+
1900-01-01 00:05:00 0.000031 0.053857 0.001116 23.910770 0.024351 79.488449 0.000000 0.000000e+00
|
|
2410
|
+
1900-01-01 12:05:00 2.665548 1.000000 3.393882 732.276428 1.000000 0.491514 99.142815 2.742904e-01
|
|
2411
|
+
1900-01-02 00:00:00 0.037800 0.312581 0.180144 212.443344 0.267168 32.083355 67.963829 5.049533e-08
|
|
2412
|
+
|
|
2413
|
+
Pull all attributes for all links at the start of the simulation
|
|
2414
|
+
|
|
2415
|
+
>>> from swmm.pandas import Output,test_out_path
|
|
2416
|
+
>>> out = Output(test_out_path)
|
|
2417
|
+
>>> out.link_result(out.links,'1900-01-01')
|
|
2418
|
+
flow_rate flow_depth flow_velocity flow_volume capacity groundwater pol_rainfall sewage
|
|
2419
|
+
link
|
|
2420
|
+
COND1 0.000031 0.053857 0.001116 23.910770 0.024351 79.488449 0.0 0.000000
|
|
2421
|
+
COND2 0.000000 0.000100 0.000000 0.074102 0.000161 0.000000 0.0 0.000000
|
|
2422
|
+
COND3 0.000000 0.000100 0.000000 0.076337 0.000113 0.000000 0.0 0.000000
|
|
2423
|
+
COND4 0.027627 0.038128 0.304938 49.596237 0.026561 3.034879 0.0 86.882553
|
|
2424
|
+
COND5 0.000000 0.000100 0.000000 0.012962 0.000127 0.000000 0.0 0.000000
|
|
2425
|
+
COND6 0.000000 0.000100 0.000000 0.000404 0.000014 0.000000 0.0 0.000000
|
|
2426
|
+
PUMP1 0.000000 0.000000 0.000000 0.000000 0.000000 0.000000 0.0 0.000000
|
|
2427
|
+
WR1 0.000000 0.000000 0.000000 0.000000 1.000000 3.935642 0.0 95.884094
|
|
2428
|
+
"""
|
|
2429
|
+
if isinstance(link, arrayish) and isinstance(time, arrayish):
|
|
2430
|
+
raise Exception("Can only have multiple of one of link and time")
|
|
2431
|
+
|
|
2432
|
+
elif isinstance(link, arrayish):
|
|
2433
|
+
label = "link"
|
|
2434
|
+
labels, indices = self._validateElement(link, self.links)
|
|
2435
|
+
timeIndex = self._time2step([time])[0]
|
|
2436
|
+
|
|
2437
|
+
values = vstack(
|
|
2438
|
+
[
|
|
2439
|
+
output.get_link_result(self._handle, timeIndex, idx)
|
|
2440
|
+
for idx in indices
|
|
2441
|
+
]
|
|
2442
|
+
)
|
|
2443
|
+
|
|
2444
|
+
else:
|
|
2445
|
+
label = "datetime"
|
|
2446
|
+
times = self.timeIndex if time is None else atleast_1d(time)
|
|
2447
|
+
indices = self._time2step(times)
|
|
2448
|
+
|
|
2449
|
+
# since the timeIndex matches on nearst, we rebuild
|
|
2450
|
+
# the label in case it wasn't exact
|
|
2451
|
+
labels = self.timeIndex[indices]
|
|
2452
|
+
|
|
2453
|
+
linkIndex = self._linkIndex(link)
|
|
2454
|
+
values = atleast_2d(
|
|
2455
|
+
vstack(
|
|
2456
|
+
[
|
|
2457
|
+
output.get_link_result(self._handle, idx, linkIndex)
|
|
2458
|
+
for idx in indices
|
|
2459
|
+
]
|
|
2460
|
+
)
|
|
2461
|
+
)
|
|
2462
|
+
|
|
2463
|
+
if not asframe:
|
|
2464
|
+
return values
|
|
2465
|
+
|
|
2466
|
+
dfIndex = Index(labels, name=label)
|
|
2467
|
+
|
|
2468
|
+
return DataFrame(
|
|
2469
|
+
values, index=dfIndex, columns=_enum_keys(self.link_attributes)
|
|
2470
|
+
)
|
|
2471
|
+
|
|
2472
|
+
@output_open_handler
|
|
2473
|
+
def system_result(
|
|
2474
|
+
self,
|
|
2475
|
+
time: str | int | datetime,
|
|
2476
|
+
asframe=True,
|
|
2477
|
+
) -> DataFrame | ndarray:
|
|
2478
|
+
"""For a given time, get all system attributes.
|
|
2479
|
+
|
|
2480
|
+
Parameters
|
|
2481
|
+
----------
|
|
2482
|
+
time: Union[str, int, datetime]
|
|
2483
|
+
The datetime or simulation index.
|
|
2484
|
+
|
|
2485
|
+
asframe: bool
|
|
2486
|
+
A switch to return an indexed DataFrame. Set to False to get an array of values only, defaults to True.
|
|
2487
|
+
|
|
2488
|
+
Returns
|
|
2489
|
+
-------
|
|
2490
|
+
Union[pd.DataFrame,np.ndarray]
|
|
2491
|
+
A DataFrame of attribute values in each row for requested simulation time.
|
|
2492
|
+
|
|
2493
|
+
Examples
|
|
2494
|
+
---------
|
|
2495
|
+
|
|
2496
|
+
Pull all attributes at start of simulation
|
|
2497
|
+
|
|
2498
|
+
>>> from swmm.pandas import Output,test_out_path
|
|
2499
|
+
>>> out = Output(test_out_path)
|
|
2500
|
+
>>> out.system_result('1900-01-01')
|
|
2501
|
+
result
|
|
2502
|
+
attribute
|
|
2503
|
+
air_temp 70.000000
|
|
2504
|
+
rainfall 0.030000
|
|
2505
|
+
snow_depth 0.000000
|
|
2506
|
+
evap_infil_loss 0.013983
|
|
2507
|
+
runoff_flow 0.000000
|
|
2508
|
+
dry_weather_inflow 0.801000
|
|
2509
|
+
gw_inflow 0.101807
|
|
2510
|
+
rdii_inflow 0.000000
|
|
2511
|
+
direct_inflow 0.000000
|
|
2512
|
+
total_lateral_inflow 0.902807
|
|
2513
|
+
flood_losses 0.000000
|
|
2514
|
+
outfall_flows 0.000000
|
|
2515
|
+
volume_stored 168.436996
|
|
2516
|
+
evap_rate 0.000000
|
|
2517
|
+
ptnl_evap_rate 0.000000
|
|
2518
|
+
"""
|
|
2519
|
+
|
|
2520
|
+
timeIndex = self._time2step([time])[0]
|
|
2521
|
+
|
|
2522
|
+
values = asarray(output.get_system_result(self._handle, timeIndex, 0))
|
|
2523
|
+
|
|
2524
|
+
if not asframe:
|
|
2525
|
+
return values
|
|
2526
|
+
|
|
2527
|
+
dfIndex = Index(_enum_keys(self.system_attributes), name="attribute")
|
|
2528
|
+
|
|
2529
|
+
return DataFrame(values, index=dfIndex, columns=["result"])
|
|
2530
|
+
|
|
2531
|
+
def getStructure(self, link, node):
|
|
2532
|
+
"""
|
|
2533
|
+
Return a structure object for a given list of links and nodes.
|
|
2534
|
+
|
|
2535
|
+
Parameters
|
|
2536
|
+
----------
|
|
2537
|
+
link: Union[str, Sequence[str]]
|
|
2538
|
+
The list of links that belong to the structure.
|
|
2539
|
+
node: Union[str, Sequence[str]]
|
|
2540
|
+
The list of nodes that below to the structure.
|
|
2541
|
+
|
|
2542
|
+
Returns
|
|
2543
|
+
-------
|
|
2544
|
+
Structure
|
|
2545
|
+
Structure comprised of the given links and nodes.
|
|
2546
|
+
"""
|
|
2547
|
+
return Structure(self, link, node)
|
|
2548
|
+
|
|
2549
|
+
# close outfile when object deleted
|
|
2550
|
+
# this doesn't always get called on sys.exit()
|
|
2551
|
+
# better to use output object with context
|
|
2552
|
+
# manager to ensure _open() and _close() are always closed
|
|
2553
|
+
# in some cases, you can get a memory leak message from swig:
|
|
2554
|
+
# >>> exit()
|
|
2555
|
+
# swig/python detected a memory leak of type 'struct Handle *', no destructor found.
|
|
2556
|
+
def __del__(self) -> None:
|
|
2557
|
+
"""
|
|
2558
|
+
Destructor for outfile handle
|
|
2559
|
+
|
|
2560
|
+
:return: Nothing
|
|
2561
|
+
:rtype: None
|
|
2562
|
+
"""
|
|
2563
|
+
self._close()
|
|
2564
|
+
|
|
2565
|
+
# method used for context manager with statement
|
|
2566
|
+
def __enter__(self):
|
|
2567
|
+
self._open()
|
|
2568
|
+
return self
|
|
2569
|
+
|
|
2570
|
+
# method used for context manager with statement
|
|
2571
|
+
def __exit__(self, *arg) -> None:
|
|
2572
|
+
self._close()
|
|
2573
|
+
|
|
2574
|
+
def open(self):
|
|
2575
|
+
"open the output file"
|
|
2576
|
+
self._open()
|
|
2577
|
+
|
|
2578
|
+
def close(self):
|
|
2579
|
+
"close the output file"
|
|
2580
|
+
self._close()
|