py-ewr 2.0.0__py3-none-any.whl → 2.1.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- py_ewr/data_inputs.py +6 -217
- py_ewr/evaluate_EWRs.py +107 -965
- py_ewr/model_metadata/SiteID_MDBA.csv +80 -15
- py_ewr/observed_handling.py +3 -9
- py_ewr/parameter_metadata/ewr_calc_config.json +2 -2
- py_ewr/parameter_metadata/parameter_sheet.csv +961 -1098
- py_ewr/scenario_handling.py +53 -58
- py_ewr/summarise_results.py +1 -3
- py_ewr-2.1.2.dist-info/METADATA +222 -0
- py_ewr-2.1.2.dist-info/RECORD +15 -0
- {py_ewr-2.0.0.dist-info → py_ewr-2.1.2.dist-info}/WHEEL +1 -1
- py_ewr/climate_data/climate_cats.csv +0 -212
- py_ewr/climate_data/climate_cats_10000year.csv +0 -10003
- py_ewr-2.0.0.dist-info/METADATA +0 -190
- py_ewr-2.0.0.dist-info/RECORD +0 -17
- {py_ewr-2.0.0.dist-info → py_ewr-2.1.2.dist-info}/LICENSE +0 -0
- {py_ewr-2.0.0.dist-info → py_ewr-2.1.2.dist-info}/top_level.txt +0 -0
py_ewr/scenario_handling.py
CHANGED
|
@@ -16,31 +16,29 @@ log.addHandler(logging.NullHandler())
|
|
|
16
16
|
from . import data_inputs, evaluate_EWRs, summarise_results
|
|
17
17
|
#----------------------------------- Scenario testing handling functions--------------------------#
|
|
18
18
|
|
|
19
|
-
def
|
|
20
|
-
|
|
21
|
-
|
|
19
|
+
# def gauge_only_column(df: pd.DataFrame) -> pd.DataFrame:
|
|
20
|
+
# '''Ingesting scenario file locations with a standard timeseries format,
|
|
21
|
+
# returns a dictionary of flow dataframes with their associated header data
|
|
22
22
|
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
Results:
|
|
27
|
-
pd.DataFrame: model file converted to dataframe
|
|
23
|
+
# Args:
|
|
24
|
+
# csv_file (str): location of model file
|
|
28
25
|
|
|
29
|
-
|
|
26
|
+
# Results:
|
|
27
|
+
# pd.DataFrame: model file converted to dataframe
|
|
28
|
+
# '''
|
|
30
29
|
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
df.columns = siteList
|
|
30
|
+
# siteList = []
|
|
31
|
+
# for location in df.columns:
|
|
32
|
+
# gauge = extract_gauge_from_string(location)
|
|
33
|
+
# siteList.append(gauge)
|
|
34
|
+
# # Save over the top of the column headings with the new list containing only the gauges
|
|
35
|
+
# df.columns = siteList
|
|
38
36
|
|
|
39
|
-
|
|
37
|
+
# return df
|
|
40
38
|
|
|
41
39
|
|
|
42
40
|
def unpack_model_file(csv_file: str, main_key: str, header_key: str) -> tuple:
|
|
43
|
-
'''Ingesting scenario file locations of model files with all formats (excluding
|
|
41
|
+
'''Ingesting scenario file locations of model files with all formats (excluding standard timeseries format), seperates the flow data and header data
|
|
44
42
|
returns a dictionary of flow dataframes with their associated header data
|
|
45
43
|
|
|
46
44
|
Args:
|
|
@@ -67,6 +65,7 @@ def unpack_model_file(csv_file: str, main_key: str, header_key: str) -> tuple:
|
|
|
67
65
|
for row in cr:
|
|
68
66
|
if row[0].startswith(line):
|
|
69
67
|
headerVal = pos
|
|
68
|
+
break
|
|
70
69
|
pos = pos + 1
|
|
71
70
|
if main_key == 'Dy':
|
|
72
71
|
df = pd.read_csv(url, header=headerVal, dtype={'Dy':'int', 'Mn': 'int', 'Year': 'int'}, skiprows=range(headerVal+1, headerVal+2))
|
|
@@ -86,6 +85,7 @@ def unpack_model_file(csv_file: str, main_key: str, header_key: str) -> tuple:
|
|
|
86
85
|
for row in csv_reader:
|
|
87
86
|
if row[0].startswith(line):
|
|
88
87
|
headerVal = line_count
|
|
88
|
+
break
|
|
89
89
|
line_count = line_count + 1
|
|
90
90
|
if main_key == 'Dy':
|
|
91
91
|
df = pd.read_csv(file, header=headerVal, dtype={'Dy':'int', 'Mn': 'int', 'Year': 'int'}, skiprows=range(headerVal+1, headerVal+2))
|
|
@@ -106,6 +106,7 @@ def unpack_model_file(csv_file: str, main_key: str, header_key: str) -> tuple:
|
|
|
106
106
|
for row in cr:
|
|
107
107
|
if row[0].startswith(line):
|
|
108
108
|
headerVal = pos
|
|
109
|
+
break
|
|
109
110
|
pos = pos + 1
|
|
110
111
|
junkRows = headerVal # Junk rows because rows prior to this value will be discarded
|
|
111
112
|
df = pd.read_csv(url, header=headerVal, nrows = (endLine-junkRows-1), dtype={'Site':'str', 'Measurand': 'str', 'Quality': 'str'})
|
|
@@ -124,9 +125,9 @@ def unpack_model_file(csv_file: str, main_key: str, header_key: str) -> tuple:
|
|
|
124
125
|
for row in csv_reader:
|
|
125
126
|
if row[0].startswith(line):
|
|
126
127
|
headerVal = line_count
|
|
127
|
-
|
|
128
128
|
# Then get column length:
|
|
129
129
|
num_cols = num_cols = list(range(0,len(row),1))
|
|
130
|
+
break
|
|
130
131
|
|
|
131
132
|
line_count = line_count + 1
|
|
132
133
|
junkRows = headerVal # Junk rows because rows prior to this value will be discarded
|
|
@@ -246,7 +247,7 @@ def cleaner_NSW(input_df: pd.DataFrame) -> pd.DataFrame:
|
|
|
246
247
|
|
|
247
248
|
return cleaned_df
|
|
248
249
|
|
|
249
|
-
def
|
|
250
|
+
def cleaner_standard_timeseries(input_df: pd.DataFrame, ewr_table_path: str = None) -> pd.DataFrame:
|
|
250
251
|
'''Ingests dataframe, removes junk columns, fixes date, allocates gauges to either flow/level
|
|
251
252
|
|
|
252
253
|
Args:
|
|
@@ -274,37 +275,31 @@ def cleaner_IQQM_10000yr(input_df: pd.DataFrame, ewr_table_path: str = None) ->
|
|
|
274
275
|
date_range = pd.period_range(date_start, date_end, freq = 'D')
|
|
275
276
|
cleaned_df['Date'] = date_range
|
|
276
277
|
cleaned_df = cleaned_df.set_index('Date')
|
|
277
|
-
|
|
278
|
-
# Split gauges into flow and level, allocate to respective dataframe
|
|
279
|
-
flow_gauges = data_inputs.get_gauges('flow gauges',ewr_table_path)
|
|
280
|
-
level_gauges = data_inputs.get_gauges('level gauges', ewr_table_path)
|
|
278
|
+
|
|
281
279
|
df_flow = pd.DataFrame(index = cleaned_df.index)
|
|
282
280
|
df_level = pd.DataFrame(index = cleaned_df.index)
|
|
281
|
+
|
|
283
282
|
for gauge in cleaned_df.columns:
|
|
284
|
-
|
|
285
|
-
|
|
286
|
-
|
|
287
|
-
|
|
283
|
+
gauge_only = extract_gauge_from_string(gauge)
|
|
284
|
+
if 'flow' in gauge:
|
|
285
|
+
df_flow[gauge_only] = cleaned_df[gauge].copy(deep=True)
|
|
286
|
+
if 'level' in gauge:
|
|
287
|
+
df_level[gauge_only] = cleaned_df[gauge].copy(deep=True)
|
|
288
|
+
if not gauge_only:
|
|
289
|
+
log.info('Could not identify gauge in column name:', gauge, ', skipping analysis of data in this column.')
|
|
288
290
|
return df_flow, df_level
|
|
289
291
|
|
|
290
292
|
def extract_gauge_from_string(input_string: str) -> str:
|
|
291
|
-
'''Takes in a
|
|
293
|
+
'''Takes in a strings, pulls out the gauge number from this string
|
|
292
294
|
|
|
293
295
|
Args:
|
|
294
296
|
input_string (str): string which may contain a gauge number
|
|
295
297
|
|
|
296
298
|
Returns:
|
|
297
299
|
str: Gauge number as a string if found, None if not found
|
|
298
|
-
|
|
299
300
|
'''
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
for i in found:
|
|
303
|
-
if len(i) >= 6:
|
|
304
|
-
gauge = i
|
|
305
|
-
return gauge
|
|
306
|
-
else:
|
|
307
|
-
return None
|
|
301
|
+
gauge = input_string.split('_')[0]
|
|
302
|
+
return gauge
|
|
308
303
|
|
|
309
304
|
def match_MDBA_nodes(input_df: pd.DataFrame, model_metadata: pd.DataFrame, ewr_table_path: str) -> tuple:
|
|
310
305
|
'''Checks if the source file columns have EWRs available, returns a flow and level dataframe with only
|
|
@@ -372,12 +367,10 @@ def any_cllmm_to_process(gauge_results: dict)->bool:
|
|
|
372
367
|
|
|
373
368
|
class ScenarioHandler:
|
|
374
369
|
|
|
375
|
-
def __init__(self,
|
|
370
|
+
def __init__(self, scenario_file: str, model_format:str, parameter_sheet:str = None,
|
|
376
371
|
calc_config_path:str = None):
|
|
377
|
-
self.
|
|
372
|
+
self.scenario_file = scenario_file
|
|
378
373
|
self.model_format = model_format
|
|
379
|
-
self.allowance = allowance
|
|
380
|
-
self.climate = climate
|
|
381
374
|
self.yearly_events = None
|
|
382
375
|
self.pu_ewr_statistics = None
|
|
383
376
|
self.summary_results = None
|
|
@@ -389,16 +382,21 @@ class ScenarioHandler:
|
|
|
389
382
|
def _get_file_names(self, loaded_files):
|
|
390
383
|
|
|
391
384
|
file_locations = {}
|
|
392
|
-
for file in loaded_files:
|
|
393
|
-
|
|
394
|
-
|
|
395
|
-
|
|
385
|
+
# for file in loaded_files:
|
|
386
|
+
if '/' in loaded_files:
|
|
387
|
+
full_name = loaded_files.split('/')
|
|
388
|
+
elif ('\\' in loaded_files):
|
|
389
|
+
full_name = loaded_files.split('\\')
|
|
390
|
+
else:
|
|
391
|
+
full_name = loaded_files
|
|
392
|
+
name_exclude_extension = full_name[-1].split('.csv')[0]
|
|
393
|
+
file_locations[str(name_exclude_extension)] = loaded_files
|
|
396
394
|
|
|
397
395
|
return file_locations
|
|
398
396
|
|
|
399
397
|
def process_scenarios(self):
|
|
400
398
|
|
|
401
|
-
scenarios = self._get_file_names(self.
|
|
399
|
+
scenarios = self._get_file_names(self.scenario_file)
|
|
402
400
|
|
|
403
401
|
# Analyse all scenarios for EWRs
|
|
404
402
|
detailed_results = {}
|
|
@@ -406,16 +404,15 @@ class ScenarioHandler:
|
|
|
406
404
|
for scenario in tqdm(scenarios, position = 0, leave = True,
|
|
407
405
|
bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}',
|
|
408
406
|
desc= 'Evaluating scenarios'):
|
|
409
|
-
|
|
410
407
|
if self.model_format == 'Bigmod - MDBA':
|
|
411
408
|
data, header = unpack_model_file(scenarios[scenario], 'Dy', 'Field')
|
|
412
409
|
data = build_MDBA_columns(data, header)
|
|
413
410
|
df_clean = cleaner_MDBA(data)
|
|
414
411
|
df_F, df_L = match_MDBA_nodes(df_clean, data_inputs.get_MDBA_codes(), self.parameter_sheet)
|
|
415
412
|
|
|
416
|
-
elif self.model_format == '
|
|
417
|
-
|
|
418
|
-
df_F, df_L =
|
|
413
|
+
elif self.model_format == 'Standard time-series':
|
|
414
|
+
df = pd.read_csv(scenarios[scenario], index_col = 'Date')
|
|
415
|
+
df_F, df_L = cleaner_standard_timeseries(df, self.parameter_sheet)
|
|
419
416
|
|
|
420
417
|
elif self.model_format == 'Source - NSW (res.csv)':
|
|
421
418
|
data, header = unpack_model_file(scenarios[scenario], 'Date', 'Field')
|
|
@@ -429,20 +426,18 @@ class ScenarioHandler:
|
|
|
429
426
|
EWR_table, bad_EWRs = data_inputs.get_EWR_table(self.parameter_sheet)
|
|
430
427
|
calc_config = data_inputs.get_ewr_calc_config(self.calc_config_path)
|
|
431
428
|
for gauge in all_locations:
|
|
432
|
-
gauge_results[gauge], gauge_events[gauge] = evaluate_EWRs.calc_sorter(df_F, df_L, gauge,
|
|
433
|
-
EWR_table, calc_config)
|
|
434
|
-
|
|
429
|
+
gauge_results[gauge], gauge_events[gauge] = evaluate_EWRs.calc_sorter(df_F, df_L, gauge,
|
|
430
|
+
EWR_table, calc_config)
|
|
435
431
|
detailed_results[scenario] = gauge_results
|
|
436
|
-
|
|
432
|
+
#print(detailed_results)
|
|
437
433
|
detailed_events[scenario] = gauge_events
|
|
438
|
-
|
|
434
|
+
#print(detailed_events)
|
|
439
435
|
self.pu_ewr_statistics = detailed_results
|
|
440
436
|
self.yearly_events = detailed_events
|
|
441
437
|
|
|
442
438
|
self.flow_data = df_F
|
|
443
439
|
self.level_data = df_L
|
|
444
440
|
|
|
445
|
-
|
|
446
441
|
def get_all_events(self)-> pd.DataFrame:
|
|
447
442
|
|
|
448
443
|
if not self.yearly_events:
|
|
@@ -561,7 +556,7 @@ class ScenarioHandler:
|
|
|
561
556
|
yearly_ewr_results = summarise_results.join_ewr_parameters(cols_to_add=['Multigauge'],
|
|
562
557
|
left_table=yearly_ewr_results,
|
|
563
558
|
left_on=['gauge','pu','ewrCode'],
|
|
564
|
-
selected_columns= ['Year', 'eventYears', 'numAchieved', 'numEvents', 'numEventsAll',
|
|
559
|
+
selected_columns= ['Year', 'eventYears', 'numAchieved', 'numEvents', 'numEventsAll',
|
|
565
560
|
'eventLength', 'eventLengthAchieved', 'totalEventDays', 'totalEventDaysAchieved',
|
|
566
561
|
'maxEventDays', 'maxRollingEvents', 'maxRollingAchievement',
|
|
567
562
|
'missingDays', 'totalPossibleDays', 'ewrCode',
|
py_ewr/summarise_results.py
CHANGED
|
@@ -630,7 +630,7 @@ def get_rolling_max_interEvents(df:pd.DataFrame, start_date: date, end_date: dat
|
|
|
630
630
|
master_dict[scenario][gauge][pu] = {}
|
|
631
631
|
if ewr not in master_dict[scenario][gauge][pu]:
|
|
632
632
|
master_dict[scenario][gauge][pu][ewr] = evaluate_EWRs.construct_event_dict(unique_years)
|
|
633
|
-
# Pull EWR start and end date from EWR dataset and clean
|
|
633
|
+
# Pull EWR start and end date from EWR dataset and clean
|
|
634
634
|
EWR_info = {}
|
|
635
635
|
EWR_info['start_date'] = data_inputs.ewr_parameter_grabber(EWR_table, gauge, pu, ewr, 'StartMonth')
|
|
636
636
|
EWR_info['end_date'] = data_inputs.ewr_parameter_grabber(EWR_table, gauge, pu, ewr, 'EndMonth')
|
|
@@ -647,8 +647,6 @@ def get_rolling_max_interEvents(df:pd.DataFrame, start_date: date, end_date: dat
|
|
|
647
647
|
else:
|
|
648
648
|
EWR_info['end_day'] = None
|
|
649
649
|
EWR_info['end_month'] =int(EWR_info['end_date'])
|
|
650
|
-
# if ewr == "LF2_WP":
|
|
651
|
-
# if unique_EWR == "big10602.bmdTEMPORARY_ID_SPLIT425010TEMPORARY_ID_SPLITMurray River - Lock 10 to Lock 9TEMPORARY_ID_SPLITLF2_WP":
|
|
652
650
|
|
|
653
651
|
# Iterate over the interevent periods for this EWR
|
|
654
652
|
for i, row in df_subset.iterrows():
|
|
@@ -0,0 +1,222 @@
|
|
|
1
|
+
Metadata-Version: 2.1
|
|
2
|
+
Name: py_ewr
|
|
3
|
+
Version: 2.1.2
|
|
4
|
+
Summary: Environmental Water Requirement calculator
|
|
5
|
+
Home-page: https://github.com/MDBAuth/EWR_tool
|
|
6
|
+
Author: Martin Job
|
|
7
|
+
Author-email: Martin.Job@mdba.gov.au
|
|
8
|
+
Project-URL: Bug Tracker, https://github.com/MDBAuth/EWR_tool/issues
|
|
9
|
+
Classifier: Programming Language :: Python :: 3
|
|
10
|
+
Classifier: License :: OSI Approved :: GNU General Public License (GPL)
|
|
11
|
+
Classifier: Operating System :: OS Independent
|
|
12
|
+
Classifier: Development Status :: 4 - Beta
|
|
13
|
+
Classifier: Programming Language :: Python
|
|
14
|
+
Classifier: Programming Language :: Python :: 3
|
|
15
|
+
Classifier: Programming Language :: Python :: 3.8
|
|
16
|
+
Classifier: Programming Language :: Python :: 3.9
|
|
17
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
18
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
19
|
+
Classifier: Framework :: Pytest
|
|
20
|
+
Description-Content-Type: text/markdown
|
|
21
|
+
License-File: LICENSE
|
|
22
|
+
Requires-Dist: ipython ==8.8.0
|
|
23
|
+
Requires-Dist: ipywidgets ==7.7.0
|
|
24
|
+
Requires-Dist: pandas ==2.0.3
|
|
25
|
+
Requires-Dist: requests ==2.25.1
|
|
26
|
+
Requires-Dist: tqdm ==4.64.0
|
|
27
|
+
Requires-Dist: mdba-gauge-getter ==0.5.1
|
|
28
|
+
Requires-Dist: cachetools ==5.2.0
|
|
29
|
+
|
|
30
|
+
[]()
|
|
31
|
+
[](https://pypi.org/project/py-ewr/)
|
|
32
|
+
[](https://pypi.org/project/py-ewr/)
|
|
33
|
+
[](https://zenodo.org/badge/latestdoi/342122359)
|
|
34
|
+
|
|
35
|
+
### **EWR tool version 2.1.2 README**
|
|
36
|
+
|
|
37
|
+
### **Notes on recent updates**
|
|
38
|
+
- Ability to pass a climte categorisation file has been removed. No EWRs require climate categorisation anymore so this feature was redundant.
|
|
39
|
+
- Ability to pass an allowance on some parts of the parameter sheet. This feature was not transparent and resulted in some counter-intuitive results in many cases so the feature has been removed during this update. If users are interested in assessing partial success through an allowance feature we recommend downloading the parameter sheet, making the required modifications (e.g. multiplying the duration column by a factor of 0.9 to simulate a 10% allowance), and running the EWR tool with this.
|
|
40
|
+
- Ability to pass multiple model scenarios to the package has been removed. It is recommended to use an alternative method if you want to send multiple scenarios to the package. An example has been provided below by calling the EWR tool package in a loop. This has been implemented because some scenarios may have multiple different files and this was the cleanest way to account for this.
|
|
41
|
+
|
|
42
|
+
### **Installation**
|
|
43
|
+
|
|
44
|
+
Note - requires Python 3.8 or newer
|
|
45
|
+
|
|
46
|
+
Step 1.
|
|
47
|
+
Upgrade pip
|
|
48
|
+
```bash
|
|
49
|
+
python -m pip install –-upgrade pip
|
|
50
|
+
```
|
|
51
|
+
|
|
52
|
+
Step 2.
|
|
53
|
+
```bash
|
|
54
|
+
pip install py-ewr
|
|
55
|
+
```
|
|
56
|
+
|
|
57
|
+
### Option 1: Running the observed mode of the tool
|
|
58
|
+
The EWR tool will use a second program called gauge getter to first download the river data at the locations and dates selected and then run this through the EWR tool
|
|
59
|
+
|
|
60
|
+
```python
|
|
61
|
+
|
|
62
|
+
from datetime import datetime
|
|
63
|
+
|
|
64
|
+
#USER INPUT REQUIRED>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
|
|
65
|
+
|
|
66
|
+
dates = {'start_date': datetime(YYYY, 7, 1),
|
|
67
|
+
'end_date': datetime(YYYY, 6, 30)}
|
|
68
|
+
|
|
69
|
+
gauges = ['Gauge1', 'Gauge2']
|
|
70
|
+
|
|
71
|
+
# END USER INPUT<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
|
|
72
|
+
|
|
73
|
+
```
|
|
74
|
+
|
|
75
|
+
```python
|
|
76
|
+
|
|
77
|
+
from py_ewr.observed_handling import ObservedHandler
|
|
78
|
+
|
|
79
|
+
# Running the EWR tool:
|
|
80
|
+
ewr_oh = ObservedHandler(gauges=gauges, dates=dates)
|
|
81
|
+
|
|
82
|
+
# Generating tables:
|
|
83
|
+
# Table 1: Summarised EWR results for the entire timeseries
|
|
84
|
+
ewr_results = ewr_oh.get_ewr_results()
|
|
85
|
+
|
|
86
|
+
# Table 2: Summarised EWR results, aggregated to water years:
|
|
87
|
+
yearly_ewr_results = ewr_oh.get_yearly_ewr_results()
|
|
88
|
+
|
|
89
|
+
# Table 3: All events details regardless of duration
|
|
90
|
+
all_events = ewr_oh.get_all_events()
|
|
91
|
+
|
|
92
|
+
# Table 4: Inverse of Table 3 showing the interevent periods
|
|
93
|
+
all_interEvents = ewr_oh.get_all_interEvents()
|
|
94
|
+
|
|
95
|
+
# Table 5: All events details that also meet the duration requirement:
|
|
96
|
+
all_successfulEvents = ewr_oh.get_all_successful_events()
|
|
97
|
+
|
|
98
|
+
# Table 6: Inverse of Table 5 showing the interevent periods:
|
|
99
|
+
all_successful_interEvents = ewr_oh.get_all_successful_interEvents()
|
|
100
|
+
|
|
101
|
+
```
|
|
102
|
+
|
|
103
|
+
### Option 2: Running model scenarios through the EWR tool
|
|
104
|
+
|
|
105
|
+
1. Tell the tool where the model files are (can either be local or in a remote location)
|
|
106
|
+
2. Tell the tool what format the model files are in (Current model format options: 'Bigmod - MDBA', 'Source - NSW (res.csv)', 'IQQM - NSW 10,000 years' - see manual for formatting requirements)
|
|
107
|
+
|
|
108
|
+
```python
|
|
109
|
+
#USER INPUT REQUIRED>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
|
|
110
|
+
|
|
111
|
+
# Minimum 1 scenario and 1 related file required
|
|
112
|
+
scenarios = {'Scenario1': ['file/location/1', 'file/location/2', 'file/location/3'],
|
|
113
|
+
'Scenario2': ['file/location/1', 'file/location/2', 'file/location/3']}
|
|
114
|
+
|
|
115
|
+
model_format = 'Bigmod - MDBA'
|
|
116
|
+
|
|
117
|
+
# END USER INPUT<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
|
|
118
|
+
|
|
119
|
+
```
|
|
120
|
+
|
|
121
|
+
``` python
|
|
122
|
+
from py_ewr.scenario_handling import ScenarioHandler
|
|
123
|
+
import pandas as pd
|
|
124
|
+
|
|
125
|
+
ewr_results_dict = {}
|
|
126
|
+
yearly_results_dict = {}
|
|
127
|
+
all_events_dict = {}
|
|
128
|
+
all_interEvents_dict = {}
|
|
129
|
+
all_successful_Events_dict = {}
|
|
130
|
+
all_successful_interEvents_dict = {}
|
|
131
|
+
|
|
132
|
+
for scenario_name, scenario_list in scenarios.items():
|
|
133
|
+
ewr_results = pd.DataFrame()
|
|
134
|
+
yearly_ewr_results = pd.DataFrame()
|
|
135
|
+
all_events = pd.DataFrame()
|
|
136
|
+
all_interEvents = pd.DataFrame()
|
|
137
|
+
all_successful_Events = pd.DataFrame()
|
|
138
|
+
all_successful_interEvents = pd.DataFrame()
|
|
139
|
+
for file in scenarios[scenario_name]:
|
|
140
|
+
|
|
141
|
+
# Running the EWR tool:
|
|
142
|
+
ewr_sh = ScenarioHandler(scenario_file = file,
|
|
143
|
+
model_format = model_format)
|
|
144
|
+
|
|
145
|
+
# Return each table and stitch the different files of the same scenario together:
|
|
146
|
+
# Table 1: Summarised EWR results for the entire timeseries
|
|
147
|
+
temp_ewr_results = ewr_sh.get_ewr_results()
|
|
148
|
+
ewr_results = pd.concat([ewr_results, temp_ewr_results], axis = 0)
|
|
149
|
+
# Table 2: Summarised EWR results, aggregated to water years:
|
|
150
|
+
temp_yearly_ewr_results = ewr_sh.get_yearly_ewr_results()
|
|
151
|
+
yearly_ewr_results = pd.concat([yearly_ewr_results, temp_yearly_ewr_results], axis = 0)
|
|
152
|
+
# Table 3: All events details regardless of duration
|
|
153
|
+
temp_all_events = ewr_sh.get_all_events()
|
|
154
|
+
all_events = pd.concat([all_events, temp_all_events], axis = 0)
|
|
155
|
+
# Table 4: Inverse of Table 3 showing the interevent periods
|
|
156
|
+
temp_all_interEvents = ewr_sh.get_all_interEvents()
|
|
157
|
+
all_interEvents = pd.concat([all_interEvents, temp_all_interEvents], axis = 0)
|
|
158
|
+
# Table 5: All events details that also meet the duration requirement:
|
|
159
|
+
temp_all_successfulEvents = ewr_sh.get_all_successful_events()
|
|
160
|
+
all_successful_Events = pd.concat([all_successful_Events, temp_all_successfulEvents], axis = 0)
|
|
161
|
+
# Table 6: Inverse of Table 5 showing the interevent periods:
|
|
162
|
+
temp_all_successful_interEvents = ewr_sh.get_all_successful_interEvents()
|
|
163
|
+
all_successful_interEvents = pd.concat([all_successful_interEvents, temp_all_successful_interEvents], axis = 0)
|
|
164
|
+
|
|
165
|
+
|
|
166
|
+
# Optional code to output results to csv files:
|
|
167
|
+
ewr_results.to_csv(scenario_name + 'all_results.csv')
|
|
168
|
+
yearly_ewr_results.to_csv(scenario_name + 'yearly_ewr_results.csv')
|
|
169
|
+
all_events.to_csv(scenario_name + 'all_events.csv')
|
|
170
|
+
all_interEvents.to_csv(scenario_name + 'all_interevents.csv')
|
|
171
|
+
all_successful_Events.to_csv(scenario_name + 'all_successful_Events.csv')
|
|
172
|
+
all_successful_interEvents.to_csv(scenario_name + 'all_successful_interEvents.csv')
|
|
173
|
+
|
|
174
|
+
# Save the final tables to the dictionaries:
|
|
175
|
+
ewr_results_dict[scenario_name] = ewr_results
|
|
176
|
+
yearly_results_dict[scenario_name] = yearly_ewr_results
|
|
177
|
+
all_events_dict[scenario_name] = all_events_dict
|
|
178
|
+
all_interEvents_dict[scenario_name] = all_interEvents
|
|
179
|
+
all_successful_Events_dict[scenario_name] = all_successful_Events
|
|
180
|
+
all_successful_interEvents_dict[scenario_name] = all_successful_interEvents
|
|
181
|
+
|
|
182
|
+
|
|
183
|
+
```
|
|
184
|
+
|
|
185
|
+
|
|
186
|
+
### **Purpose**
|
|
187
|
+
This tool has two purposes:
|
|
188
|
+
1. Operational: Tracking EWR success at gauges of interest in real time - option 1 above.
|
|
189
|
+
2. Planning: Comparing EWR success between scenarios (i.e. model runs) - option 2 above.
|
|
190
|
+
|
|
191
|
+
**Support**
|
|
192
|
+
For issues relating to the script, a tutorial, or feedback please contact Lara Palmer at lara.palmer@mdba.gov.au, Martin Job at martin.job@mdba.gov.au, or Joel Bailey at joel.bailey@mdba.gov.au
|
|
193
|
+
|
|
194
|
+
|
|
195
|
+
**Disclaimer**
|
|
196
|
+
Every effort has been taken to ensure the EWR database represents the original EWRs from state long term water plans as best as possible, and that the code within this tool has been developed to interpret and analyse these EWRs in an accurate way. However, there may still be unresolved bugs in the EWR parameter sheet and/or EWR tool. Please report any bugs to the issues tab under the GitHub project so we can investigate further.
|
|
197
|
+
|
|
198
|
+
|
|
199
|
+
**Notes on development of the dataset of EWRs**
|
|
200
|
+
The MDBA has worked with Basin state representatives to ensure scientific integrity of EWRs has been maintained when translating from raw EWRs in the Basin state Long Term Water Plans (LTWPs) to the machine readable format found in the parameter sheet within this tool.
|
|
201
|
+
|
|
202
|
+
**Compatibility**
|
|
203
|
+
|
|
204
|
+
NSW:
|
|
205
|
+
- All Queensland catchments
|
|
206
|
+
- All New South Wales catchments
|
|
207
|
+
- All South Australian catchments
|
|
208
|
+
- All EWRs from river based Environmental Water Management Plans (EWMPs) in Victoria*
|
|
209
|
+
|
|
210
|
+
*Currently the wetland EWMPS and mixed wetland-river EWMPs in Victoria contain EWRs that cannot be evaluated by an automated EWR tool so the EWRs from these plans have been left out for now. The MDBA will work with our Victorian colleagues to ensure any updated EWRs in these plans are integrated into the tool where possible.
|
|
211
|
+
|
|
212
|
+
**Input data**
|
|
213
|
+
|
|
214
|
+
- Gauge data from the relevant Basin state websites and the Bureau of Meteorology website
|
|
215
|
+
- Scenario data input by the user
|
|
216
|
+
- Model metadata for location association between gauge ID's and model nodes
|
|
217
|
+
- EWR parameter sheet
|
|
218
|
+
|
|
219
|
+
**Running the tool**
|
|
220
|
+
|
|
221
|
+
Consult the user manual for instructions on how to run the tool. Please email the above email addresses for a copy of the user manual.
|
|
222
|
+
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
py_ewr/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
2
|
+
py_ewr/data_inputs.py,sha256=BLJrrasZYpSUkC0yVBzVqnz6WGT2Y3SJYJJhguDi-Vo,18046
|
|
3
|
+
py_ewr/evaluate_EWRs.py,sha256=1JYWNtw5MYgf92jXhDNlgGkDx2mcfBP34OO-0jgal1w,229279
|
|
4
|
+
py_ewr/observed_handling.py,sha256=NaySsqe8nWgZdDRSr22EYmCwf0uNzVj9DFF6hzNr6ws,18173
|
|
5
|
+
py_ewr/scenario_handling.py,sha256=QfUTpFr4uylTcoIDEjQJi2YZfydQR1I0ayorGJKFNkk,26399
|
|
6
|
+
py_ewr/summarise_results.py,sha256=4IsRBfWP6XQvLRU2TUrY8lIGwlD1JMpmbEkbIWUaE4k,29787
|
|
7
|
+
py_ewr/model_metadata/SiteID_MDBA.csv,sha256=OR2fUIwU8ei85Nq6nZxb_ozC4q9scATrnqED21PlGRM,162848
|
|
8
|
+
py_ewr/model_metadata/SiteID_NSW.csv,sha256=UVBxN43Z5KWCvWhQ5Rh6TNEn35q4_sjPxKyHg8wPFws,6805
|
|
9
|
+
py_ewr/parameter_metadata/ewr_calc_config.json,sha256=BkuSJI6NqZzJxZMd8eh3GBs5uoHfI2joOkPrKLu9-60,17556
|
|
10
|
+
py_ewr/parameter_metadata/parameter_sheet.csv,sha256=ufOYn59cS4kl9XF1WaNqBhodcsBxUWH8Y0g1qHVUAcg,594257
|
|
11
|
+
py_ewr-2.1.2.dist-info/LICENSE,sha256=ogEPNDSH0_dhiv_lT3ifVIdgIzHAqNA_SemnxUfPBJk,7048
|
|
12
|
+
py_ewr-2.1.2.dist-info/METADATA,sha256=a-gfT0nKlAbWRwRSx9GMnjo4GQocz4LctPEatn9eE_4,10070
|
|
13
|
+
py_ewr-2.1.2.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92
|
|
14
|
+
py_ewr-2.1.2.dist-info/top_level.txt,sha256=n3725d-64Cjyb-YMUMV64UAuIflzUh2_UZSxiIbrur4,7
|
|
15
|
+
py_ewr-2.1.2.dist-info/RECORD,,
|