swmm-pandas 0.6.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,773 @@
1
+ from __future__ import annotations
2
+ import re
3
+ from io import StringIO
4
+
5
+ from pandas.core.api import DataFrame, Timestamp, to_datetime, to_timedelta, Series
6
+ from pandas.io.parsers import read_csv, read_fwf
7
+
8
+
9
+ class Report:
10
+ _rptfile: str
11
+ """path to swmm rpt file"""
12
+
13
+ _rpt_text: str
14
+ """text string of rpt file contents"""
15
+
16
+ _sections: dict[str, str]
17
+ """dictionary of SWMM report sections as {section name: section text}"""
18
+
19
+ def __init__(self, rptfile: str):
20
+ """Base class for a SWMM simulation report file.
21
+
22
+ The report object provides an api for the tables in the the SWMM
23
+ simulation report file. Tables are access as properties of the object
24
+ and returned as pandas DataFrames.
25
+
26
+ Parameters
27
+ ----------
28
+ rptfile: str
29
+ model report file path
30
+ """
31
+
32
+ self._rptfile = rptfile
33
+
34
+ with open(rptfile) as file:
35
+ self._rpt_text = file.read()
36
+
37
+ self._sections = {
38
+ self._find_title(section): section
39
+ for section in self._find_sections(self._rpt_text)
40
+ }
41
+
42
+ @staticmethod
43
+ def _find_sections(rpt_text: str) -> list[str]:
44
+ r"""
45
+ Function to split the report file text into separate sections using a regex
46
+ pattern match:
47
+
48
+ "^\s+$\s+(?=\*|A)": pattern matches blank lines followed by at least
49
+ 1 white space followed by a lookhead for a asterisk (demarks section headers)
50
+ or the letter A (looks for the word Analysis at the end of the report file)
51
+
52
+
53
+ Parameters
54
+ ----------
55
+ rpt_text: str
56
+ Text content of the report file
57
+ Returns
58
+ -------
59
+ List[str]
60
+ A list section texts
61
+ """
62
+ # pattern to match blank lines preceding a line of asterisks
63
+ section_pattern = R"^\s+$\s+(?=\*|A)"
64
+ section_comp = re.compile(section_pattern, re.MULTILINE)
65
+ return list(
66
+ map(lambda x: x.replace("\n ", "\n"), section_comp.split(rpt_text)[2:-1])
67
+ )
68
+
69
+ @staticmethod
70
+ def _find_title(section: str) -> str:
71
+ r"""
72
+ Function to extract the title of section produced by _find_sections using
73
+ regex to match lines between two lines of asterisks.
74
+
75
+ "^\*+[\s\S]*?\n([\s\S]*?)\s*\*+": Pattern matches any number white space or non-white
76
+ space characters that are between:
77
+ 1. A line starting with a string of asterisks followed by any white space or
78
+ non-whitespace chacter and ending with a new line break
79
+ 2. A line starting with a string of asterisks
80
+
81
+
82
+ Parameters
83
+ ----------
84
+ section: str
85
+ The section text produced by _find_sections
86
+
87
+ Returns
88
+ -------
89
+ str
90
+ Title of section
91
+
92
+ Raises
93
+ ------
94
+ Exception
95
+ If regex could not find a match
96
+ """
97
+ # pattern to match line between two lines of asterisks
98
+ title_pattern = R"^\*+[\s\S]*?\n([\s\S]*?)\s*\*+"
99
+ title_comp = re.compile(title_pattern, re.MULTILINE)
100
+ s = title_comp.match(section)
101
+ if s:
102
+ # if string is found, split line on more two consecutive spaces and pull the first token
103
+ return s.group(1).split(" ")[0]
104
+ else:
105
+ raise Exception(f"Error finding title for section\n{section}")
106
+
107
+ @staticmethod
108
+ def _split_section(section: str) -> tuple[str, str]:
109
+ """
110
+ Function to split a report section into header and data elements. Relies on regex
111
+ matching lines with consecutive dashes indicating header lines.
112
+
113
+ Parameters
114
+ ----------
115
+ section: str
116
+ The section text produced by _find_sections
117
+
118
+ Returns
119
+ -------
120
+ Tuple[str, str]
121
+ header text and data text
122
+
123
+ Raises
124
+ ------
125
+ Exception
126
+ If regex could not find a match
127
+ """
128
+ title = Report._find_title(section)
129
+ subsections = re.split(R"\s*-+\n", section)
130
+ num_subsections = len(subsections)
131
+
132
+ if num_subsections == 1:
133
+ header = "Result"
134
+ # split section on line of asterisks
135
+ data = re.split(R"\*+", section)[-1]
136
+
137
+ elif num_subsections == 2:
138
+ header, data = subsections
139
+
140
+ elif num_subsections == 3:
141
+ notes, header, data = subsections
142
+
143
+ elif num_subsections == 4:
144
+ notes, header, data, sytem = subsections
145
+
146
+ else:
147
+ raise Exception(f"Error parsing table {title}")
148
+
149
+ return header, data
150
+
151
+ @staticmethod
152
+ def _parse_header(header: str) -> list[str]:
153
+ """
154
+ Parse header line produced from _split_section into list of column headers. Uses pandas
155
+ read_fwf to automatically parse multi line headers present in report file.
156
+
157
+
158
+ Parameters
159
+ ----------
160
+ header: str
161
+ Header text string produced from _split_section
162
+
163
+ Returns
164
+ -------
165
+ List[str]
166
+ List of column headers
167
+ """
168
+
169
+ # substitute single spaces between words with underscores
170
+ # replace asterisks or dashes with spaces
171
+ header = [
172
+ re.sub(R"(?<=\w)[^\S\r\n](?=\w)", "_", field[1].dropna().str.cat(sep="_"))
173
+ for field in read_fwf(
174
+ StringIO(re.sub(R"\*|-", " ", header)), header=None
175
+ ).items()
176
+ ]
177
+
178
+ # split day and time into separate fields to be recombined in to datetime object
179
+ # when parsing table
180
+ if "Time_of_Max_Occurrence_days_hr:min" in header:
181
+ max_idx = header.index("Time_of_Max_Occurrence_days_hr:min")
182
+ header[max_idx] = "days"
183
+ header.insert(max_idx + 1, "Time_of_Max")
184
+
185
+ return header
186
+
187
+ @staticmethod
188
+ def _parse_table(
189
+ header: list[str], data: str, sep: str = R"\s{2,}|\s:\s", index_col: int = 0
190
+ ) -> DataFrame:
191
+ r"""
192
+ Function to parse data string produced from _split_section into pandas DataFrame
193
+
194
+ Parameters
195
+ ----------
196
+ header: Sequence[str]
197
+ Sequence of column names to assign to DataFrame. Mostly can be produced from _parse_header.
198
+ data: str
199
+ Data string produced form _split_section
200
+ sep: str, optional
201
+ Delimeter to be fed into pandas read_csv function that operates on data string
202
+ , by default R"\s{2,}|\s:\s"
203
+ index_col: int, optional
204
+ Column in data to be used as DataFrame index, by default 0
205
+
206
+ Returns
207
+ -------
208
+ pd.DataFrame
209
+ Report data table
210
+ """
211
+
212
+ # remove leading spaces on each line and replace long runs of periods with spaces
213
+ data = re.sub(R"^\s+", "", re.sub(R"\.{2,}", " ", data), flags=re.MULTILINE)
214
+
215
+ # by default read in data with minimum 2-spaces or semicolon flanked by spaces as delimiter
216
+ df = read_csv(
217
+ filepath_or_buffer=StringIO(data),
218
+ header=None,
219
+ engine="python",
220
+ sep=sep,
221
+ index_col=index_col,
222
+ names=header,
223
+ )
224
+
225
+ # convert day and time columns into a single datetime column
226
+ if "Time_of_Max" in df.columns:
227
+ # convert time of max to timedelta
228
+ df["Time_of_Max"] = to_timedelta(
229
+ df.pop("days").astype(int), unit="D"
230
+ ) + to_timedelta(
231
+ df["Time_of_Max"] + ":00"
232
+ ) # type: ignore
233
+ return df
234
+
235
+ @property
236
+ def analysis_options(self) -> Series:
237
+ """
238
+ Pandas series containing the analysis options listed in the
239
+ report file including units, models, methods, dates, time steps, etc.
240
+
241
+ Returns
242
+ -------
243
+ Series
244
+ Series of options.
245
+ """
246
+ if not hasattr(self, "_analysis_options"):
247
+ header, data = self._split_section(self._sections["Analysis Options"])
248
+ df = self._parse_table(["Option", "Setting"], data)["Setting"]
249
+ self._analysis_options = df.dropna()
250
+
251
+ return self._analysis_options
252
+
253
+ @property
254
+ def runoff_quantity_continuity(self) -> DataFrame:
255
+ """
256
+ Runoff quantity continuity error table in volume and depth units.
257
+ System wide error is show in percent.
258
+
259
+
260
+ Returns
261
+ -------
262
+ pd.DataFrame
263
+ DataFrame of runoff quantity continuity error table.
264
+ """
265
+ if not hasattr(self, "_runoff_quantity_continuity"):
266
+ header, data = self._split_section(
267
+ self._sections["Runoff Quantity Continuity"]
268
+ )
269
+ # substitute spaces between words with underscore so read_fwf works
270
+ # had to use some regex to not also match new lines
271
+ header = self._parse_header(re.sub(R"(?<=\w)[^\S\r\n](?=\w)", "_", header))
272
+ self._runoff_quantity_continuity = self._parse_table(header, data)
273
+ return self._runoff_quantity_continuity
274
+
275
+ @property
276
+ def runoff_quality_continuity(self) -> DataFrame:
277
+ """
278
+ Runoff quality continuity error table in mass units for each pollutant.
279
+ System wide error is show in percent.
280
+
281
+
282
+ Returns
283
+ -------
284
+ pd.DataFrame
285
+ DataFrame of runoff quality continuity error table
286
+ """
287
+ if not hasattr(self, "_runoff_quality_continuity"):
288
+ header, data = self._split_section(
289
+ self._sections["Runoff Quality Continuity"]
290
+ )
291
+ # substitute spaces between words with underscore so read_fwf works
292
+ # had to use some regex to not also match new lines
293
+ header = self._parse_header(re.sub(R"(?<=\w)[^\S\r\n](?=\w)", "_", header))
294
+ self._runoff_quality_continuity = self._parse_table(header, data)
295
+ return self._runoff_quality_continuity
296
+
297
+ @property
298
+ def groundwater_continuity(self) -> DataFrame:
299
+ """
300
+ Groundwater quantity continuity error table in volume and depth units.
301
+ System wide error is show in percent.
302
+
303
+
304
+ Returns
305
+ -------
306
+ pd.DataFrame
307
+ DataFrame of groundwater quantity continuity error table
308
+ """
309
+ if not hasattr(self, "_groundwater_continuity"):
310
+ header, data = self._split_section(self._sections["Groundwater Continuity"])
311
+ # substitute spaces between words with underscore so read_fwf works
312
+ # had to use some regex to not also match new lines
313
+ header = self._parse_header(re.sub(R"(?<=\w)[^\S\r\n](?=\w)", "_", header))
314
+ self._groundwater_continuity = self._parse_table(header, data)
315
+ return self._groundwater_continuity
316
+
317
+ @property
318
+ def flow_routing_continuity(self) -> DataFrame:
319
+ """
320
+ Flow routing continuity error table in volume units.
321
+ System wide error is show in percent.
322
+
323
+
324
+ Returns
325
+ -------
326
+ pd.DataFrame
327
+ DataFrame of flow routing continuity error table
328
+ """
329
+ if not hasattr(self, "_flow_routing_continuity"):
330
+ header, data = self._split_section(
331
+ self._sections["Flow Routing Continuity"]
332
+ )
333
+ # substitute spaces between words with underscore so read_fwf works
334
+ # had to use some regex to not also match new lines
335
+ header = self._parse_header(re.sub(R"(?<=\w)[^\S\r\n](?=\w)", "_", header))
336
+ self._flow_routing_continuity = self._parse_table(header, data)
337
+ return self._flow_routing_continuity
338
+
339
+ @property
340
+ def quality_routing_continuity(self) -> DataFrame:
341
+ """
342
+ Quality routing continuity error table in mass units.
343
+ System wide error is show in percent.
344
+
345
+
346
+ Returns
347
+ -------
348
+ pd.DataFrame
349
+ DataFrame of quality routing continuity error table
350
+ """
351
+ if not hasattr(self, "_quality_routing_continuity"):
352
+ header, data = self._split_section(
353
+ self._sections["Quality Routing Continuity"]
354
+ )
355
+ # substitute spaces between words with underscore so read_fwf works
356
+ # had to use some regex to not also match new lines
357
+ header = self._parse_header(re.sub(R"(?<=\w)[^\S\r\n](?=\w)", "_", header))
358
+ self._quality_routing_continuity = self._parse_table(header, data)
359
+ return self._quality_routing_continuity
360
+
361
+ @property
362
+ def highest_continuity_errors(self) -> DataFrame:
363
+ """
364
+ Highest continuity error table in percent.
365
+ This table shows the model elements with the highest
366
+ flow routing continuity error.
367
+
368
+ Returns
369
+ -------
370
+ pd.DataFrame
371
+ DataFrame of highest continuity errors table
372
+ """
373
+ if not hasattr(self, "_highest_errors"):
374
+ header, data = self._split_section(
375
+ self._sections["Highest Continuity Errors"]
376
+ )
377
+ df = self._parse_table(
378
+ ["object_type", "name", "percent_error"], data, sep=R"\s+", index_col=1
379
+ )
380
+ df["percent_error"] = df["percent_error"].str.strip("()%").astype(float)
381
+ self._highest_errors = df
382
+ return self._highest_errors
383
+
384
+ @property
385
+ def time_step_critical_elements(self) -> DataFrame:
386
+ """
387
+ Time-step critical elements table in percent.
388
+ This table shows the model elements that were controlling
389
+ the model time step if a variable one was used.
390
+
391
+ Returns
392
+ -------
393
+ pd.DataFrame
394
+ DataFrame of time-step critical elements table
395
+ """
396
+
397
+ if not hasattr(self, "_ts_critical"):
398
+ header, data = self._split_section(
399
+ self._sections["Time-Step Critical Elements"]
400
+ )
401
+ df = self._parse_table(
402
+ ["object_type", "name", "percent"], data, sep=R"\s+", index_col=1
403
+ )
404
+ df["percent"] = df["percent"].str.strip("()%").astype(float)
405
+ self._ts_critical = df
406
+ return self._ts_critical
407
+
408
+ @property
409
+ def highest_flow_instability_indexes(self) -> DataFrame:
410
+ """
411
+ Highest flow instability indexes.
412
+ This table shows the model elements that have the highest
413
+ flow instability.
414
+
415
+ Returns
416
+ -------
417
+ pd.DataFrame
418
+ DataFrame of highest flow instability indexes table
419
+ """
420
+ if not hasattr(self, "_highest_flow_instability_indexes"):
421
+ header, data = self._split_section(
422
+ self._sections["Highest Flow Instability Indexes"]
423
+ )
424
+ if "All links are stable" in data:
425
+ data = ""
426
+ df = self._parse_table(
427
+ ["object_type", "name", "index"], data, sep=R"\s+", index_col=1
428
+ )
429
+ df["index"] = df["index"].str.strip("()").astype(int)
430
+ self._highest_flow_instability_indexes = df
431
+ return self._highest_flow_instability_indexes
432
+
433
+ @property
434
+ def routing_time_step_summary(self) -> DataFrame:
435
+ """
436
+ Routing time step summary table that shows the average, minimum,
437
+ and maximum time steps as well as convergance summary.
438
+
439
+ Returns
440
+ -------
441
+ pd.DataFrame
442
+ DataFrame of routing time step summary table
443
+ """
444
+ if not hasattr(self, "_routing_time_step_summary"):
445
+ header, data = self._split_section(
446
+ self._sections["Routing Time Step Summary"]
447
+ )
448
+ self._routing_time_step_summary = self._parse_table(
449
+ self._parse_header(header), data, sep=R"\s+:\s+"
450
+ )
451
+ return self._routing_time_step_summary
452
+
453
+ @property
454
+ def runoff_summary(self) -> DataFrame:
455
+ """
456
+ Runoff summary table for each subcatchment that details rainfall,
457
+ runon, evap, infil, and runoff.
458
+
459
+ Returns
460
+ -------
461
+ pd.DataFrame
462
+ DataFrame of subcatchment runoff summary table
463
+ """
464
+ if not hasattr(self, "_runoff_summary"):
465
+ header, data = self._split_section(
466
+ self._sections["Subcatchment Runoff Summary"]
467
+ )
468
+ self._runoff_summary = self._parse_table(self._parse_header(header), data)
469
+ return self._runoff_summary
470
+
471
+ @property
472
+ def groundwater_summary(self) -> DataFrame:
473
+ """
474
+ Groundwater summary table for each subcatchment that details groundwater
475
+ inflow, outflow, moisture, and water table.
476
+
477
+ Returns
478
+ -------
479
+ pd.DataFrame
480
+ DataFrame of subcatchment groundwater summary table
481
+ """
482
+ if not hasattr(self, "_groundwater_summary"):
483
+ header, data = self._split_section(self._sections["Groundwater Summary"])
484
+ self._groundwater_summary = self._parse_table(
485
+ self._parse_header(header), data
486
+ )
487
+ return self._groundwater_summary
488
+
489
+ @property
490
+ def washoff_summary(self) -> DataFrame:
491
+ """
492
+ Washoff summary table that details the total pollutant load
493
+ that was washed off of each subcatchment.
494
+
495
+ Returns
496
+ -------
497
+ pd.DataFrame
498
+ DataFrame of subcatchment washoff summary table
499
+ """
500
+ if not hasattr(self, "_washoff_summary"):
501
+ header, data = self._split_section(
502
+ self._sections["Subcatchment Washoff Summary"]
503
+ )
504
+ self._washoff_summary = self._parse_table(self._parse_header(header), data)
505
+ return self._washoff_summary
506
+
507
+ @property
508
+ def node_depth_summary(self) -> DataFrame:
509
+ """
510
+ Node depth summary table that details the average and maximum
511
+ depth and HGL simulated for each node.
512
+
513
+ Returns
514
+ -------
515
+ pd.DataFrame
516
+ DataFrame of node depth summary table
517
+ """
518
+ if not hasattr(self, "_node_depth_summary"):
519
+ header, data = self._split_section(self._sections["Node Depth Summary"])
520
+ self._node_depth_summary = self._parse_table(
521
+ self._parse_header(header), data, sep=R"\s{1,}|\s:\s"
522
+ )
523
+ return self._node_depth_summary
524
+
525
+ @property
526
+ def node_inflow_summary(self) -> DataFrame:
527
+ """
528
+ Node inflow summary table that details the maximum inflow rates, total
529
+ inflow volumes, and flow balance error percent for each node.
530
+
531
+ Returns
532
+ -------
533
+ pd.DataFrame
534
+ DataFrame of node inflow summary table
535
+ """
536
+ if not hasattr(self, "_node_inflow_summary"):
537
+ header, data = self._split_section(self._sections["Node Inflow Summary"])
538
+
539
+ self._node_inflow_summary = self._parse_table(
540
+ self._parse_header(header), data
541
+ )
542
+ return self._node_inflow_summary
543
+
544
+ @property
545
+ def node_surchage_summary(self) -> DataFrame:
546
+ """
547
+ Node surcharge summary that details the maximum surcharge level and duration
548
+ of surharge for each node.
549
+
550
+ Returns
551
+ -------
552
+ pd.DataFrame
553
+ DataFrame of node surcharge summary table
554
+ """
555
+ if not hasattr(self, "_node_surcharge_summary"):
556
+ header, data = self._split_section(self._sections["Node Surcharge Summary"])
557
+
558
+ self._node_surcharge_summary = self._parse_table(
559
+ self._parse_header(header), data
560
+ )
561
+ return self._node_surcharge_summary
562
+
563
+ @property
564
+ def node_flooding_summary(self) -> DataFrame:
565
+ """
566
+ Node flood summary that details the maximum ponded depth, peak flooding rate, total flood volume,
567
+ total flood duration for each node.
568
+
569
+ Returns
570
+ -------
571
+ pd.DataFrame
572
+ DataFrame of node flooding summary table
573
+ """
574
+ if not hasattr(self, "_node_flooding_summary"):
575
+ header, data = self._split_section(self._sections["Node Flooding Summary"])
576
+
577
+ self._node_flooding_summary = self._parse_table(
578
+ self._parse_header(header), data
579
+ )
580
+ return self._node_flooding_summary
581
+
582
+ @property
583
+ def storage_volume_summary(self) -> DataFrame:
584
+ """
585
+ Storage volume summary that details the frequency of filling, average and peak volumes,
586
+ losses, and outfall rate for each storage unit.
587
+
588
+ Returns
589
+ -------
590
+ pd.DataFrame
591
+ DataFrame of storage volume summary table
592
+ """
593
+ if not hasattr(self, "_storage_volume_summary"):
594
+ header, data = self._split_section(self._sections["Storage Volume Summary"])
595
+ header = header.replace("Storage Unit", "Storage ")
596
+ self._storage_volume_summary = self._parse_table(
597
+ self._parse_header(header), data
598
+ )
599
+ return self._storage_volume_summary
600
+
601
+ @property
602
+ def outfall_loading_summary(self) -> DataFrame:
603
+ """
604
+ Outfall loading summary that details the flow frequency, average and peak flow rates,
605
+ total outflow volume, and pollutant mass loads for each outfall.
606
+
607
+ Returns
608
+ -------
609
+ pd.DataFrame
610
+ DataFrame of outfall loading summary table
611
+ """
612
+ if not hasattr(self, "_outfall_loading_summary"):
613
+ header, data = self._split_section(
614
+ self._sections["Outfall Loading Summary"]
615
+ )
616
+ header = header.replace("Outfall Node", "Outfall ")
617
+ self._outfall_loading_summary = self._parse_table(
618
+ self._parse_header(header), data
619
+ )
620
+ return self._outfall_loading_summary
621
+
622
+ @property
623
+ def link_flow_summary(self) -> DataFrame:
624
+ """
625
+ Link flow summary that details the peak flow, velocity, depth, and capacity for each link.
626
+
627
+ Returns
628
+ -------
629
+ pd.DataFrame
630
+ DataFrame of link flow summary table
631
+ """
632
+ if not hasattr(self, "_link_flow_summary"):
633
+ header, data = self._split_section(self._sections["Link Flow Summary"])
634
+ header = header.replace("|", " ")
635
+ self._link_flow_summary = self._parse_table(
636
+ self._parse_header(header), data, sep=R"\s{1,}|\s:\s"
637
+ )
638
+ return self._link_flow_summary
639
+
640
+ @property
641
+ def flow_classification_summary(self) -> DataFrame:
642
+ """
643
+ Flow classification summary that details the amount of conduit lengthening during
644
+ the simualtion and the fraction of simulation time that is dry, subcritical, supercritical,
645
+ or critical flow for each conduit.
646
+
647
+ Returns
648
+ -------
649
+ pd.DataFrame
650
+ DataFrame of flow classification summary table
651
+ """
652
+ if not hasattr(self, "_flow_classification_summary"):
653
+ header, data = self._split_section(
654
+ self._sections["Flow Classification Summary"]
655
+ )
656
+ to_remove = "---------- Fraction of Time in Flow Class ----------"
657
+ to_replace = " "
658
+ header = header.replace(to_remove, to_replace)
659
+ self._flow_classification_summary = self._parse_table(
660
+ self._parse_header(header), data
661
+ )
662
+ return self._flow_classification_summary
663
+
664
+ @property
665
+ def conduit_surcharge_summary(self) -> DataFrame:
666
+ """
667
+ Conduit surcharge summary that details the hours of surcharging and
668
+ capacity limited conditions.
669
+
670
+ Returns
671
+ -------
672
+ pd.DataFrame
673
+ DataFrame of conduit surcharge summary table
674
+ """
675
+ if not hasattr(self, "_conduit_surcharge_summary"):
676
+ header, data = self._split_section(
677
+ self._sections["Conduit Surcharge Summary"]
678
+ )
679
+ to_remove = "--------- Hours Full --------"
680
+ to_replace = "HrsFull HoursFull HrsFull "
681
+ header = header.replace(to_remove, to_replace)
682
+ self._conduit_surcharge_summary = self._parse_table(
683
+ self._parse_header(header), data
684
+ )
685
+ return self._conduit_surcharge_summary
686
+
687
+ @property
688
+ def pumping_summary(self) -> DataFrame:
689
+ """
690
+ Pumping summary that details the utilization, peak flow rates, total flow volume,
691
+ power usage, and time off pump curve for each pump.
692
+
693
+ Returns
694
+ -------
695
+ pd.DataFrame
696
+ DataFrame of pumping summary table
697
+ """
698
+ if not hasattr(self, "_pumping_summary"):
699
+ header, data = self._split_section(self._sections["Pumping Summary"])
700
+ header = self._parse_header(header)
701
+ header[-1] = "Percent_Time_Off_Pump_Curve_Low"
702
+ header.append("Percent_Time_Off_Pump_Curve_High")
703
+ self._pumping_summary = self._parse_table(header, data)
704
+ return self._pumping_summary
705
+
706
+ @property
707
+ def link_pollutant_load_summary(self) -> DataFrame:
708
+ """
709
+ Link pollutant load summary that details the total pollutant mass discharged
710
+ from each link.
711
+
712
+ Returns
713
+ -------
714
+ pd.DataFrame
715
+ DataFrame of link pollutant load summary table
716
+ """
717
+ if not hasattr(self, "_link_pollutant_load_summary"):
718
+ header, data = self._split_section(
719
+ self._sections["Link Pollutant Load Summary"]
720
+ )
721
+
722
+ self._link_pollutant_load_summary = self._parse_table(
723
+ self._parse_header(header), data
724
+ )
725
+ return self._link_pollutant_load_summary
726
+
727
+ @property
728
+ def analysis_begun(self) -> Timestamp:
729
+ """
730
+ Date and time when the simulation was started
731
+
732
+ Returns
733
+ -------
734
+ Timestamp
735
+ Simulation start time
736
+
737
+ Raises
738
+ ------
739
+ Exception
740
+ if analysis begun text could not be found in the report file
741
+ """
742
+ if not hasattr(self, "_analysis_begun"):
743
+ pattern = R"\s+Analysis begun on:\s+([^\n]+)$"
744
+ s = re.search(pattern, self._rpt_text, flags=re.MULTILINE)
745
+ if s:
746
+ self._analysis_begun = to_datetime(s.group(1))
747
+ else:
748
+ raise Exception("Error finding analysis begun")
749
+ return self._analysis_begun
750
+
751
+ @property
752
+ def analysis_end(self) -> Timestamp:
753
+ """
754
+ Date and time when the simulation ended
755
+
756
+ Returns
757
+ -------
758
+ Timestamp
759
+ Simulation end time
760
+
761
+ Raises
762
+ ------
763
+ Exception
764
+ if analysis ended text could not be found in the report file
765
+ """
766
+ if not hasattr(self, "_analysis_end"):
767
+ pattern = R"\s+Analysis ended on:\s+([^\n]+)$"
768
+ s = re.search(pattern, self._rpt_text, flags=re.MULTILINE)
769
+ if s:
770
+ self._analysis_end = to_datetime(s.group(1))
771
+ else:
772
+ raise Exception("Error finding analysis end")
773
+ return self._analysis_end