py-ewr 2.2.4__tar.gz → 2.2.6__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (34) hide show
  1. {py_ewr-2.2.4 → py_ewr-2.2.6}/PKG-INFO +50 -26
  2. {py_ewr-2.2.4 → py_ewr-2.2.6}/README.md +41 -17
  3. {py_ewr-2.2.4 → py_ewr-2.2.6}/py_ewr/data_inputs.py +18 -4
  4. {py_ewr-2.2.4 → py_ewr-2.2.6}/py_ewr/evaluate_EWRs.py +76 -32
  5. py_ewr-2.2.6/py_ewr/model_metadata/SiteID_MDBA.csv +2695 -0
  6. {py_ewr-2.2.4 → py_ewr-2.2.6}/py_ewr/observed_handling.py +6 -7
  7. py_ewr-2.2.6/py_ewr/parameter_metadata/ewr2obj.csv +40590 -0
  8. py_ewr-2.2.6/py_ewr/parameter_metadata/obj2target.csv +8962 -0
  9. py_ewr-2.2.6/py_ewr/parameter_metadata/obj2yrtarget.csv +106 -0
  10. {py_ewr-2.2.4 → py_ewr-2.2.6}/py_ewr/parameter_metadata/parameter_sheet.csv +3459 -3447
  11. {py_ewr-2.2.4 → py_ewr-2.2.6}/py_ewr/scenario_handling.py +83 -62
  12. {py_ewr-2.2.4 → py_ewr-2.2.6}/py_ewr/summarise_results.py +38 -3
  13. {py_ewr-2.2.4 → py_ewr-2.2.6}/py_ewr.egg-info/PKG-INFO +50 -26
  14. {py_ewr-2.2.4 → py_ewr-2.2.6}/py_ewr.egg-info/SOURCES.txt +3 -0
  15. py_ewr-2.2.6/py_ewr.egg-info/requires.txt +8 -0
  16. {py_ewr-2.2.4 → py_ewr-2.2.6}/setup.py +10 -10
  17. {py_ewr-2.2.4 → py_ewr-2.2.6}/tests/test_data_inputs.py +14 -1
  18. {py_ewr-2.2.4 → py_ewr-2.2.6}/tests/test_evaluate_ewr_rest.py +1263 -180
  19. {py_ewr-2.2.4 → py_ewr-2.2.6}/tests/test_scenario_handling.py +207 -0
  20. py_ewr-2.2.4/py_ewr/model_metadata/SiteID_MDBA.csv +0 -2686
  21. py_ewr-2.2.4/py_ewr.egg-info/requires.txt +0 -9
  22. {py_ewr-2.2.4 → py_ewr-2.2.6}/LICENSE +0 -0
  23. {py_ewr-2.2.4 → py_ewr-2.2.6}/py_ewr/__init__.py +0 -0
  24. {py_ewr-2.2.4 → py_ewr-2.2.6}/py_ewr/io.py +0 -0
  25. {py_ewr-2.2.4 → py_ewr-2.2.6}/py_ewr/model_metadata/SiteID_NSW.csv +0 -0
  26. {py_ewr-2.2.4 → py_ewr-2.2.6}/py_ewr/model_metadata/iqqm_stations.csv +0 -0
  27. {py_ewr-2.2.4 → py_ewr-2.2.6}/py_ewr/parameter_metadata/ewr_calc_config.json +0 -0
  28. {py_ewr-2.2.4 → py_ewr-2.2.6}/py_ewr.egg-info/dependency_links.txt +0 -0
  29. {py_ewr-2.2.4 → py_ewr-2.2.6}/py_ewr.egg-info/top_level.txt +0 -0
  30. {py_ewr-2.2.4 → py_ewr-2.2.6}/pyproject.toml +0 -0
  31. {py_ewr-2.2.4 → py_ewr-2.2.6}/setup.cfg +0 -0
  32. {py_ewr-2.2.4 → py_ewr-2.2.6}/tests/test_evaluate_ewrs.py +0 -0
  33. {py_ewr-2.2.4 → py_ewr-2.2.6}/tests/test_observed_handling.py +0 -0
  34. {py_ewr-2.2.4 → py_ewr-2.2.6}/tests/test_summarise_results.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: py_ewr
3
- Version: 2.2.4
3
+ Version: 2.2.6
4
4
  Summary: Environmental Water Requirement calculator
5
5
  Home-page: https://github.com/MDBAuth/EWR_tool
6
6
  Author: Martin Job
@@ -12,42 +12,46 @@ Classifier: Operating System :: OS Independent
12
12
  Classifier: Development Status :: 4 - Beta
13
13
  Classifier: Programming Language :: Python
14
14
  Classifier: Programming Language :: Python :: 3
15
- Classifier: Programming Language :: Python :: 3.8
16
15
  Classifier: Programming Language :: Python :: 3.9
17
16
  Classifier: Programming Language :: Python :: 3.10
18
17
  Classifier: Programming Language :: Python :: 3.11
18
+ Classifier: Programming Language :: Python :: 3.12
19
+ Classifier: Programming Language :: Python :: 3.13
19
20
  Classifier: Framework :: Pytest
20
21
  Description-Content-Type: text/markdown
21
22
  License-File: LICENSE
22
- Requires-Dist: ipython==8.8.0
23
- Requires-Dist: ipywidgets==7.7.0
24
- Requires-Dist: pandas==2.0.3
25
- Requires-Dist: requests==2.25.1
23
+ Requires-Dist: pandas>2
24
+ Requires-Dist: requests>2
26
25
  Requires-Dist: mdba-gauge-getter==0.5.1
27
- Requires-Dist: cachetools==5.2.0
28
- Requires-Dist: xarray==2023.01.0
29
- Requires-Dist: netCDF4==1.6.4
30
- Requires-Dist: numpy<2
26
+ Requires-Dist: cachetools>5
27
+ Requires-Dist: xarray
28
+ Requires-Dist: h5py
29
+ Requires-Dist: netCDF4
30
+ Requires-Dist: numpy
31
31
 
32
32
  [![CI](https://github.com/MDBAuth/EWR_tool/actions/workflows/test-release.yml/badge.svg)]()
33
33
  [![PyPI - Python Version](https://img.shields.io/pypi/pyversions/py-ewr)](https://pypi.org/project/py-ewr/)
34
34
  [![PyPI](https://img.shields.io/pypi/v/py-ewr)](https://pypi.org/project/py-ewr/)
35
35
  [![DOI](https://zenodo.org/badge/342122359.svg)](https://zenodo.org/badge/latestdoi/342122359)
36
36
 
37
- ### **EWR tool version 2.2.4 README**
37
+ ### **EWR tool version 2.2.6 README**
38
38
 
39
- ### **Notes on recent version update**
39
+ ### **Notes on recent version updates**
40
+ - Including draft objective mapping files in the package (see below sub heading **Objective mapping** for more information). Objective mapping has been therefore pulled out of the parameter sheet
41
+ - Including an example parallel processing script for running the EWR tool
42
+ - Adding handling for cases where there are single MDBA bigmod site IDs mapping to multiple different gauges
43
+ - Fix SDL resource unit mapping in the parameter sheet
44
+ - Adding lat and lon to the parameter sheet
45
+ - ten thousand year handling - this has been brought back online.
40
46
  - Remove TQDM loading bars
41
- - Handle duplicate sites in MDBA siteID file - where a duplicate exists, the first match is used and the rest are skipped over
42
47
  - Adding new model format handling - 'IQQM - netcdf'
43
48
  - Standard time-series handling added - each column needs a gauge, followed by and underscore, followed by either flow or level (e.g. 409025_flow). This handling also has missing date filling - so any missing dates will be filled with NaN values in all columns.
44
- - ten thousand year handling - This has been briefly taken offline for this version.
45
49
  - bug fixes: spells of length equal to the minimum required spell length were getting filtered out of the successful events table and successful interevents table, fixed misclassification of some gauges to flow, level, and lake level categories
46
50
  - New EWRs: New Qld EWRs - SF_FD and BF_FD used to look into the FD EWRs in closer detail.
47
51
 
48
52
  ### **Installation**
49
53
 
50
- Note - requires Python 3.8 or newer
54
+ Note - requires Python 3.9 to 3.13 (inclusive)
51
55
 
52
56
  Step 1.
53
57
  Upgrade pip
@@ -109,7 +113,30 @@ all_successful_interEvents = ewr_oh.get_all_successful_interEvents()
109
113
  ### Option 2: Running model scenarios through the EWR tool
110
114
 
111
115
  1. Tell the tool where the model files are (can either be local or in a remote location)
112
- 2. Tell the tool what format the model files are in (Current model format options: 'Bigmod - MDBA', 'Source - NSW (res.csv)', 'Standard time-series' - see manual for formatting requirements)
116
+ 2. Tell the tool what format the model files are in. The current model format options are:
117
+ - 'Bigmod - MDBA'
118
+ Bigmod formatted outputs
119
+ - 'Source - NSW (res.csv)'
120
+ Source res.csv formatted outputs
121
+ - 'Standard time-series'
122
+ The first column header should be *Date* with the date values in the YYYY-MM-DD format.
123
+ The next columns should have the *gauge* followed by *_* followed by either *flow* or *level*
124
+ E.g.
125
+ | Date | 409025_flow | 409025_level | 414203_flow |
126
+ | --- | --- | --- | --- |
127
+ | 1895-07-01 | 8505 | 5.25 | 8500 |
128
+ | 1895-07-02 | 8510 | 5.26 | 8505 |
129
+
130
+ - 'ten thousand year'
131
+ This has the same formatting requirements as the 'Standard time-series'. This can handle ten thousand years worth of hydrology data.
132
+ The first column header should be *Date* with the date values in the YYYY-MM-DD format.
133
+ The next columns should have the *gauge* followed by *_* followed by either *flow* or *level*
134
+ E.g.
135
+ | Date | 409025_flow | 409025_level | 414203_flow |
136
+ | --- | --- | --- | --- |
137
+ | 105-07-01 | 8505 | 5.25 | 8500 |
138
+ | 105-07-02 | 8510 | 5.26 | 8505 |
139
+
113
140
 
114
141
  ```python
115
142
  #USER INPUT REQUIRED>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
@@ -199,11 +226,11 @@ For issues relating to the script, a tutorial, or feedback please contact Lara P
199
226
 
200
227
 
201
228
  **Disclaimer**
202
- Every effort has been taken to ensure the EWR database represents the original EWRs from state long term water plans as best as possible, and that the code within this tool has been developed to interpret and analyse these EWRs in an accurate way. However, there may still be unresolved bugs in the EWR parameter sheet and/or EWR tool. Please report any bugs to the issues tab under the GitHub project so we can investigate further.
229
+ Every effort has been taken to ensure the EWR database represents the original EWRs from state Long Term Water Plans (LTWPs) and Environmental Water Management Plans (EWMPs) as best as possible, and that the code within this tool has been developed to interpret and analyse these EWRs in an accurate way. However, there may still be unresolved bugs in the EWR parameter sheet and/or EWR tool. Please report any bugs to the issues tab under the GitHub project so we can investigate further.
203
230
 
204
231
 
205
232
  **Notes on development of the dataset of EWRs**
206
- The MDBA has worked with Basin state representatives to ensure scientific integrity of EWRs has been maintained when translating from raw EWRs in the Basin state Long Term Water Plans (LTWPs) to the machine readable format found in the parameter sheet within this tool.
233
+ The MDBA has worked with Basin state representatives to ensure scientific integrity of EWRs has been maintained when translating from raw EWRs in the Basin state LTWPs and EWMPs to the machine readable format found in the parameter sheet within this tool.
207
234
 
208
235
  **Compatibility**
209
236
 
@@ -226,11 +253,8 @@ NSW:
226
253
 
227
254
  Consult the user manual for instructions on how to run the tool. Please email the above email addresses for a copy of the user manual.
228
255
 
229
- To disable progress bars, as for example when running remote scripted runs, use
230
-
231
- ``` python
232
- import os
233
- os.environ["TQDM_DISABLE"] = "1"
234
- ```
235
- *before* importing py-ewr in your script.
236
-
256
+ **Objective mapping**
257
+ Objective mapping csv files are now included in the EWR tool package. Currently this objective mapping is in an early draft format. The objective mapping will be finalised after consultation with relevant state representatives. The files are intended to be used together to link EWRs to the detailed objectives, theme level targets and specific goals. The three sheets are located in the py_ewr/parameter_metadata folder:
258
+ - ewr2obj.csv: For each planning unit, gauge, ewr combination there are either one or many env_obj codes. These env_obj codes come under one of five different theme level targets (Native Fish, Native vegetation, Waterbirds, Other species or Ecosystem functions)
259
+ - obj2target.csv: env_obj's are unique to their planning unit in the LTWP (noting there are often a lot of similarities between env_obj's in the same states). The plain english wording of the env objectives is also contained in this csv. The LTWP, planning unit and env_obj rows are repeated for each specific goal related to that LTWP, planning unit and env_obj.
260
+ - obj2yrtarget.csv: The environmental objectives are related to 5, 10 and 20 year targets
@@ -3,20 +3,24 @@
3
3
  [![PyPI](https://img.shields.io/pypi/v/py-ewr)](https://pypi.org/project/py-ewr/)
4
4
  [![DOI](https://zenodo.org/badge/342122359.svg)](https://zenodo.org/badge/latestdoi/342122359)
5
5
 
6
- ### **EWR tool version 2.2.4 README**
7
-
8
- ### **Notes on recent version update**
6
+ ### **EWR tool version 2.2.6 README**
7
+
8
+ ### **Notes on recent version updates**
9
+ - Including draft objective mapping files in the package (see below sub heading **Objective mapping** for more information). Objective mapping has been therefore pulled out of the parameter sheet
10
+ - Including an example parallel processing script for running the EWR tool
11
+ - Adding handling for cases where there are single MDBA bigmod site IDs mapping to multiple different gauges
12
+ - Fix SDL resource unit mapping in the parameter sheet
13
+ - Adding lat and lon to the parameter sheet
14
+ - ten thousand year handling - this has been brought back online.
9
15
  - Remove TQDM loading bars
10
- - Handle duplicate sites in MDBA siteID file - where a duplicate exists, the first match is used and the rest are skipped over
11
16
  - Adding new model format handling - 'IQQM - netcdf'
12
17
  - Standard time-series handling added - each column needs a gauge, followed by and underscore, followed by either flow or level (e.g. 409025_flow). This handling also has missing date filling - so any missing dates will be filled with NaN values in all columns.
13
- - ten thousand year handling - This has been briefly taken offline for this version.
14
18
  - bug fixes: spells of length equal to the minimum required spell length were getting filtered out of the successful events table and successful interevents table, fixed misclassification of some gauges to flow, level, and lake level categories
15
19
  - New EWRs: New Qld EWRs - SF_FD and BF_FD used to look into the FD EWRs in closer detail.
16
20
 
17
21
  ### **Installation**
18
22
 
19
- Note - requires Python 3.8 or newer
23
+ Note - requires Python 3.9 to 3.13 (inclusive)
20
24
 
21
25
  Step 1.
22
26
  Upgrade pip
@@ -78,7 +82,30 @@ all_successful_interEvents = ewr_oh.get_all_successful_interEvents()
78
82
  ### Option 2: Running model scenarios through the EWR tool
79
83
 
80
84
  1. Tell the tool where the model files are (can either be local or in a remote location)
81
- 2. Tell the tool what format the model files are in (Current model format options: 'Bigmod - MDBA', 'Source - NSW (res.csv)', 'Standard time-series' - see manual for formatting requirements)
85
+ 2. Tell the tool what format the model files are in. The current model format options are:
86
+ - 'Bigmod - MDBA'
87
+ Bigmod formatted outputs
88
+ - 'Source - NSW (res.csv)'
89
+ Source res.csv formatted outputs
90
+ - 'Standard time-series'
91
+ The first column header should be *Date* with the date values in the YYYY-MM-DD format.
92
+ The next columns should have the *gauge* followed by *_* followed by either *flow* or *level*
93
+ E.g.
94
+ | Date | 409025_flow | 409025_level | 414203_flow |
95
+ | --- | --- | --- | --- |
96
+ | 1895-07-01 | 8505 | 5.25 | 8500 |
97
+ | 1895-07-02 | 8510 | 5.26 | 8505 |
98
+
99
+ - 'ten thousand year'
100
+ This has the same formatting requirements as the 'Standard time-series'. This can handle ten thousand years worth of hydrology data.
101
+ The first column header should be *Date* with the date values in the YYYY-MM-DD format.
102
+ The next columns should have the *gauge* followed by *_* followed by either *flow* or *level*
103
+ E.g.
104
+ | Date | 409025_flow | 409025_level | 414203_flow |
105
+ | --- | --- | --- | --- |
106
+ | 105-07-01 | 8505 | 5.25 | 8500 |
107
+ | 105-07-02 | 8510 | 5.26 | 8505 |
108
+
82
109
 
83
110
  ```python
84
111
  #USER INPUT REQUIRED>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
@@ -168,11 +195,11 @@ For issues relating to the script, a tutorial, or feedback please contact Lara P
168
195
 
169
196
 
170
197
  **Disclaimer**
171
- Every effort has been taken to ensure the EWR database represents the original EWRs from state long term water plans as best as possible, and that the code within this tool has been developed to interpret and analyse these EWRs in an accurate way. However, there may still be unresolved bugs in the EWR parameter sheet and/or EWR tool. Please report any bugs to the issues tab under the GitHub project so we can investigate further.
198
+ Every effort has been taken to ensure the EWR database represents the original EWRs from state Long Term Water Plans (LTWPs) and Environmental Water Management Plans (EWMPs) as best as possible, and that the code within this tool has been developed to interpret and analyse these EWRs in an accurate way. However, there may still be unresolved bugs in the EWR parameter sheet and/or EWR tool. Please report any bugs to the issues tab under the GitHub project so we can investigate further.
172
199
 
173
200
 
174
201
  **Notes on development of the dataset of EWRs**
175
- The MDBA has worked with Basin state representatives to ensure scientific integrity of EWRs has been maintained when translating from raw EWRs in the Basin state Long Term Water Plans (LTWPs) to the machine readable format found in the parameter sheet within this tool.
202
+ The MDBA has worked with Basin state representatives to ensure scientific integrity of EWRs has been maintained when translating from raw EWRs in the Basin state LTWPs and EWMPs to the machine readable format found in the parameter sheet within this tool.
176
203
 
177
204
  **Compatibility**
178
205
 
@@ -195,11 +222,8 @@ NSW:
195
222
 
196
223
  Consult the user manual for instructions on how to run the tool. Please email the above email addresses for a copy of the user manual.
197
224
 
198
- To disable progress bars, as for example when running remote scripted runs, use
199
-
200
- ``` python
201
- import os
202
- os.environ["TQDM_DISABLE"] = "1"
203
- ```
204
- *before* importing py-ewr in your script.
205
-
225
+ **Objective mapping**
226
+ Objective mapping csv files are now included in the EWR tool package. Currently this objective mapping is in an early draft format. The objective mapping will be finalised after consultation with relevant state representatives. The files are intended to be used together to link EWRs to the detailed objectives, theme level targets and specific goals. The three sheets are located in the py_ewr/parameter_metadata folder:
227
+ - ewr2obj.csv: For each planning unit, gauge, ewr combination there are either one or many env_obj codes. These env_obj codes come under one of five different theme level targets (Native Fish, Native vegetation, Waterbirds, Other species or Ecosystem functions)
228
+ - obj2target.csv: env_obj's are unique to their planning unit in the LTWP (noting there are often a lot of similarities between env_obj's in the same states). The plain english wording of the env objectives is also contained in this csv. The LTWP, planning unit and env_obj rows are repeated for each specific goal related to that LTWP, planning unit and env_obj.
229
+ - obj2yrtarget.csv: The environmental objectives are related to 5, 10 and 20 year targets
@@ -121,7 +121,7 @@ def get_MDBA_codes() -> pd.DataFrame:
121
121
  pd.DataFrame: dataframe for linking MDBA model nodes to gauges
122
122
 
123
123
  '''
124
- metadata = pd.read_csv( BASE_PATH / 'model_metadata/SiteID_MDBA.csv', engine = 'python', dtype=str, encoding='windows-1252')
124
+ metadata = pd.read_csv( BASE_PATH / 'model_metadata/SiteID_MDBA.csv', engine = 'python', dtype=str)#, encoding='windows-1252')
125
125
 
126
126
  return metadata
127
127
 
@@ -392,12 +392,12 @@ def get_gauges(category: str, ewr_table_path: str = None) -> set:
392
392
  multi_gauges = get_multi_gauges('gauges')
393
393
  multi_gauges = list(multi_gauges.values())
394
394
  if category == 'all gauges':
395
- return set(EWR_table['Gauge'].to_list() + menindee_gauges + wp_gauges + multi_gauges)
395
+ return set(EWR_table['Gauge'].to_list()+menindee_gauges+wp_gauges+multi_gauges+flow_barrage_gauges+level_barrage_gauges+qld_flow_gauges+qld_level_gauges+vic_level_gauges)
396
396
  elif category == 'flow gauges':
397
397
  return set(EWR_table['Gauge'].to_list() + multi_gauges + flow_barrage_gauges + qld_flow_gauges)
398
398
  elif category == 'level gauges':
399
399
  level_gauges = EWR_table[EWR_table['FlowLevelVolume']=='L']['Gauge'].to_list()
400
- return set(menindee_gauges + wp_gauges + level_barrage_gauges + qld_level_gauges + level_gauges)
400
+ return set(menindee_gauges + wp_gauges + level_barrage_gauges + qld_level_gauges + level_gauges + vic_level_gauges)
401
401
  else:
402
402
  raise ValueError('''No gauge category sent to the "get_gauges" function''')
403
403
 
@@ -439,4 +439,18 @@ def gauge_groups(parameter_sheet: pd.DataFrame) -> dict:
439
439
 
440
440
  return flow_gauges, level_gauges, lake_level_gauges
441
441
 
442
- # def gauges_to_measurand()
442
+ # def gauges_to_measurand()
443
+
444
+ def get_causal_ewr() -> dict:
445
+
446
+ ewr2obj_path = os.path.join(BASE_PATH, "parameter_metadata/ewr2obj.csv")
447
+ obj2target_path = os.path.join(BASE_PATH, "parameter_metadata/obj2target.csv")
448
+ obj2yrtarget_path = os.path.join(BASE_PATH, "parameter_metadata/obj2yrtarget.csv")
449
+
450
+ causal_ewr = {
451
+ "ewr2obj": pd.read_csv(ewr2obj_path),
452
+ "obj2target": pd.read_csv(obj2target_path),
453
+ "obj2yrtarget":pd.read_csv(obj2yrtarget_path)
454
+ }
455
+
456
+ return causal_ewr
@@ -445,10 +445,19 @@ def get_index_date(date_index:Any)-> datetime.date:
445
445
  """
446
446
  if type(date_index) == pd._libs.tslibs.timestamps.Timestamp:
447
447
  return date_index.date()
448
- # if type(date_index) == pd._libs.tslibs.period.Period:
449
- # return date_index.date()#.to_timestamp()
450
- else:
448
+ if type(date_index) == pd._libs.tslibs.period.Period:
449
+ date_index_str = date_index.strftime('%Y-%m-%d')
450
+ # For dates between the years 100 and 999 we need to add a 0 onto the date string so strptime doesnt break
451
+ if ((int(date_index_str.split('-')[0]) >= 100) and (int(date_index_str.split('-')[0]) < 1000)):
452
+ date_index_str = '0' + date_index_str
453
+ n = datetime.datetime.strptime(date_index_str, '%Y-%m-%d').date()
454
+ return n
455
+ if type(date_index) == str:
456
+ n = datetime.datetime.strptime(date_index, '%Y-%m-%d').date()
457
+ return n
458
+ if type(date_index) == datetime.date:
451
459
  return date_index
460
+ # return date_index #TODO: should this break? i.e. we arent expecting other date formats
452
461
 
453
462
  #----------------------------------- EWR handling functions --------------------------------------#
454
463
 
@@ -1958,7 +1967,8 @@ def water_stability_check(EWR_info:Dict, iteration:int, flows:List, all_events:D
1958
1967
  if levels_are_stable:
1959
1968
  # record event opportunity for the next n days for the total period of (EggDaysSpell)+ larvae (LarvaeDaysSpell)
1960
1969
  # if the last day of the event is not over the last day of the event window
1961
- iteration_date = flow_date.date()#flow_date.to_timestamp().date()
1970
+ iteration_date = get_index_date(flow_date)
1971
+ # iteration_date = flow_date.date()#flow_date.to_timestamp().date()
1962
1972
  last_day_window = get_last_day_of_window(iteration_date, EWR_info['end_month'])
1963
1973
  event_size = EWR_info['eggs_days_spell'] + EWR_info['larvae_days_spell']
1964
1974
  if is_date_in_window(iteration_date, last_day_window, event_size):
@@ -1995,7 +2005,8 @@ def water_stability_level_check(EWR_info:Dict, iteration:int, all_events:Dict, w
1995
2005
  if levels_are_stable:
1996
2006
  # record event opportunity for the next n days for the total period of (EggDaysSpell)+ larvae (LarvaeDaysSpell)
1997
2007
  # if the last day of the event is not over the last day of the event window
1998
- iteration_date = flow_date.date()#flow_date.to_timestamp().date()
2008
+ iteration_date = get_index_date(flow_date)
2009
+ # iteration_date = flow_date.date()#flow_date.to_timestamp().date()
1999
2010
  last_day_window = get_last_day_of_window(iteration_date, EWR_info['end_month'])
2000
2011
  event_size = EWR_info['eggs_days_spell'] + EWR_info['larvae_days_spell']
2001
2012
  if is_date_in_window(iteration_date, last_day_window, event_size):
@@ -2617,7 +2628,8 @@ def create_water_stability_event(flow_date: pd.Timestamp, flows:List, iteration:
2617
2628
  """
2618
2629
  event_size = EWR_info['eggs_days_spell'] + EWR_info['larvae_days_spell']
2619
2630
  event_flows = flows[iteration: iteration + event_size]
2620
- start_event_date = flow_date.date()#flow_date.to_timestamp().date()
2631
+ start_event_date = get_index_date(flow_date)
2632
+ # start_event_date = flow_date.date()#flow_date.to_timestamp().date()
2621
2633
  event_dates = [ start_event_date + timedelta(i) for i in range(event_size)]
2622
2634
 
2623
2635
  return [(d, flow) for d, flow in zip(event_dates, event_flows)]
@@ -3800,6 +3812,7 @@ def nest_calc_percent_trigger(EWR_info:Dict, flows:List, water_years:List, dates
3800
3812
  Returns:
3801
3813
  tuple: final output with the calculation of volume all_events, durations
3802
3814
  """
3815
+ #TODO can we clean up the flow_date and iteration_date parts
3803
3816
  event = []
3804
3817
  total_event = 0
3805
3818
  all_events = construct_event_dict(water_years)
@@ -3807,19 +3820,25 @@ def nest_calc_percent_trigger(EWR_info:Dict, flows:List, water_years:List, dates
3807
3820
  gap_track = 0
3808
3821
  for i, flow in enumerate(flows[:-1]):
3809
3822
  flow_date = dates[i]
3823
+ iteration_date = get_index_date(flow_date)
3810
3824
  flow_percent_change = calc_flow_percent_change(i, flows)
3811
3825
  trigger_day = date(dates[i].year,EWR_info["trigger_month"], EWR_info["trigger_day"])
3812
3826
  cut_date = calc_nest_cut_date(EWR_info, i, dates)
3813
- is_in_trigger_window = dates[i].date() >= trigger_day \
3814
- and dates[i].date() <= trigger_day + timedelta(days=14) #.to_timestamp() .to_timestamp()
3827
+ is_in_trigger_window = iteration_date >= trigger_day \
3828
+ and iteration_date <= trigger_day + timedelta(days=14) #.to_timestamp() .to_timestamp()
3829
+ # is_in_trigger_window = dates[i].date() >= trigger_day \
3830
+ # and dates[i].date() <= trigger_day + timedelta(days=14) #.to_timestamp() .to_timestamp()
3815
3831
  iteration_no_event = 0
3816
3832
 
3817
3833
  ## if there IS an ongoing event check if we are on the trigger season window
3818
3834
  # if yes then check the current flow
3819
3835
  if total_event > 0:
3820
- if (dates[i].date() >= trigger_day) and (dates[i].date() <= cut_date):
3836
+ if (iteration_date >= trigger_day) and (iteration_date <= cut_date):
3821
3837
  event, all_events, gap_track, total_event, iteration_no_event = nest_flow_check(EWR_info, i, flow, event, all_events,
3822
3838
  gap_track, water_years, total_event, flow_date, flow_percent_change, iteration_no_event) #.to_timestamp() .to_timestamp()
3839
+ # if (dates[i].date() >= trigger_day) and (dates[i].date() <= cut_date):
3840
+ # event, all_events, gap_track, total_event, iteration_no_event = nest_flow_check(EWR_info, i, flow, event, all_events,
3841
+ # gap_track, water_years, total_event, flow_date, flow_percent_change, iteration_no_event) #.to_timestamp() .to_timestamp()
3823
3842
 
3824
3843
  # this path will only be executed if an event extends beyond the cut date
3825
3844
  else:
@@ -3840,18 +3859,23 @@ def nest_calc_percent_trigger(EWR_info:Dict, flows:List, water_years:List, dates
3840
3859
 
3841
3860
  # Check final iteration in the flow timeseries, saving any ongoing events/event gaps to their spots in the dictionaries:
3842
3861
  # reset all variable to last flow
3843
- flow_date = dates[-1].date()#.to_timestamp()
3862
+
3863
+ # flow_date = dates[-1].date()#.to_timestamp()
3864
+ flow_date = dates[-1]
3865
+ iteration_date = get_index_date(dates[-1])
3844
3866
  flow_percent_change = calc_flow_percent_change(-1, flows)
3845
3867
  trigger_day = date(dates[-1].year,EWR_info["trigger_month"], EWR_info["trigger_day"])
3846
3868
  cut_date = calc_nest_cut_date(EWR_info, -1, dates)
3847
- is_in_trigger_window = dates[-1].date() >= trigger_day - timedelta(days=7) \
3848
- and dates[-1].date() <= trigger_day + timedelta(days=7) #.to_timestamp() .to_timestamp()
3869
+ is_in_trigger_window = iteration_date >= trigger_day - timedelta(days=7) \
3870
+ and iteration_date <= trigger_day + timedelta(days=7) #.to_timestamp() .to_timestamp()
3871
+ # is_in_trigger_window = dates[-1].date() >= trigger_day - timedelta(days=7) \
3872
+ # and dates[-1].date() <= trigger_day + timedelta(days=7) #.to_timestamp() .to_timestamp()
3849
3873
  iteration_no_event = 0
3850
3874
 
3851
3875
  if total_event > 0:
3852
3876
 
3853
- if (flow_date >= trigger_day ) \
3854
- and (flow_date <= cut_date):
3877
+ if (iteration_date >= trigger_day ) \
3878
+ and (iteration_date <= cut_date): # Was flow_date instead of iteration date in both instances
3855
3879
  event, all_events, gap_track, total_event, iteration_no_event = nest_flow_check(EWR_info, -1, flows[-1], event, all_events,
3856
3880
  gap_track, water_years, total_event, flow_date, flow_percent_change, iteration_no_event)
3857
3881
 
@@ -4835,7 +4859,7 @@ def event_stats(df:pd.DataFrame, PU_df:pd.DataFrame, gauge:str, EWR:str, EWR_inf
4835
4859
  years_with_events = get_event_years_volume_achieved(events, unique_water_years)
4836
4860
 
4837
4861
  YWE = pd.Series(name = str(EWR + '_eventYears'), data = years_with_events, index = unique_water_years)
4838
- PU_df = pd.concat([PU_df, YWE], axis = 1)
4862
+ # PU_df = pd.concat([PU_df, YWE], axis = 1)
4839
4863
  # Number of event achievements:
4840
4864
  num_event_achievements = get_achievements(EWR_info, events, unique_water_years, durations)
4841
4865
 
@@ -4843,64 +4867,84 @@ def event_stats(df:pd.DataFrame, PU_df:pd.DataFrame, gauge:str, EWR:str, EWR_inf
4843
4867
  num_event_achievements = get_achievements_connecting_events(events, unique_water_years)
4844
4868
 
4845
4869
  NEA = pd.Series(name = str(EWR + '_numAchieved'), data= num_event_achievements, index = unique_water_years)
4846
- PU_df = pd.concat([PU_df, NEA], axis = 1)
4870
+ # PU_df = pd.concat([PU_df, NEA], axis = 1)
4847
4871
  # Total number of events THIS ONE IS ONLY ACHIEVED due to Filter Applied
4848
4872
  num_events = get_number_events(EWR_info, events, unique_water_years, durations)
4849
4873
  NE = pd.Series(name = str(EWR + '_numEvents'), data= num_events, index = unique_water_years)
4850
- PU_df = pd.concat([PU_df, NE], axis = 1)
4874
+ # PU_df = pd.concat([PU_df, NE], axis = 1)
4851
4875
  # Total number of events THIS ONE IS ALL EVENTS
4852
4876
  num_events_all = get_all_events(events)
4853
4877
  NEALL = pd.Series(name = str(EWR + '_numEventsAll'), data= num_events_all, index = unique_water_years)
4854
- PU_df = pd.concat([PU_df, NEALL], axis = 1)
4878
+ # PU_df = pd.concat([PU_df, NEALL], axis = 1)
4855
4879
  # Max inter event period
4856
4880
  max_inter_period = get_max_inter_event_days(no_events, unique_water_years)
4857
4881
  MIP = pd.Series(name = str(EWR + '_maxInterEventDays'), data= max_inter_period, index = unique_water_years)
4858
- PU_df = pd.concat([PU_df, MIP], axis = 1)
4882
+ # PU_df = pd.concat([PU_df, MIP], axis = 1)
4859
4883
  # Max inter event period achieved
4860
4884
  max_inter_period_achieved = get_event_max_inter_event_achieved(EWR_info, no_events, unique_water_years)
4861
4885
  MIPA = pd.Series(name = str(EWR + '_maxInterEventDaysAchieved'), data= max_inter_period_achieved, index = unique_water_years)
4862
- PU_df = pd.concat([PU_df, MIPA], axis = 1)
4886
+ # PU_df = pd.concat([PU_df, MIPA], axis = 1)
4863
4887
  # Average length of events
4864
4888
  av_length = get_average_event_length(events, unique_water_years)
4865
4889
  AL = pd.Series(name = str(EWR + '_eventLength'), data = av_length, index = unique_water_years)
4866
- PU_df = pd.concat([PU_df, AL], axis = 1)
4890
+ # PU_df = pd.concat([PU_df, AL], axis = 1)
4867
4891
  # Average length of events ONLY the ACHIEVED
4868
4892
  av_length_achieved = get_average_event_length_achieved(EWR_info, events)
4869
4893
  ALA = pd.Series(name = str(EWR + '_eventLengthAchieved' ), data = av_length_achieved, index = unique_water_years)
4870
- PU_df = pd.concat([PU_df, ALA], axis = 1)
4894
+ # PU_df = pd.concat([PU_df, ALA], axis = 1)
4871
4895
  # Total event days
4872
4896
  total_days = get_total_days(events, unique_water_years)
4873
- TD = pd.Series(name = str(EWR + '_totalEventDays'), data = total_days, index = unique_water_years)
4874
- PU_df = pd.concat([PU_df, TD], axis = 1)
4897
+ TD_A = pd.Series(name = str(EWR + '_totalEventDays'), data = total_days, index = unique_water_years)
4898
+ # PU_df = pd.concat([PU_df, TD], axis = 1)
4875
4899
  # Total event days ACHIEVED
4876
4900
  total_days_achieved = get_achieved_event_days(EWR_info, events)
4877
4901
  TDA = pd.Series(name = str(EWR + '_totalEventDaysAchieved'), data = total_days_achieved, index = unique_water_years)
4878
- PU_df = pd.concat([PU_df, TDA], axis = 1)
4902
+ # PU_df = pd.concat([PU_df, TDA], axis = 1)
4879
4903
  # Max event days
4880
4904
  max_days = get_max_event_days(events, unique_water_years)
4881
4905
  MD = pd.Series(name = str(EWR + '_maxEventDays'), data = max_days, index = unique_water_years)
4882
- PU_df = pd.concat([PU_df, MD], axis = 1)
4906
+ # PU_df = pd.concat([PU_df, MD], axis = 1)
4883
4907
  # Max rolling consecutive event days
4884
4908
  try:
4885
4909
  max_consecutive_days = get_max_consecutive_event_days(events, unique_water_years)
4886
4910
  MR = pd.Series(name = str(EWR + '_maxRollingEvents'), data = max_consecutive_days, index = unique_water_years)
4887
- PU_df = pd.concat([PU_df, MR], axis = 1)
4911
+ # PU_df = pd.concat([PU_df, MR], axis = 1)
4888
4912
  except Exception as e:
4889
4913
  max_consecutive_days = [0]*len(unique_water_years)
4890
4914
  MR = pd.Series(name = str(EWR + '_maxRollingEvents'), data = max_consecutive_days, index = unique_water_years)
4891
- PU_df = pd.concat([PU_df, MR], axis = 1)
4915
+ # PU_df = pd.concat([PU_df, MR], axis = 1)
4892
4916
  log.error(e)
4893
4917
  # Max rolling duration achieved
4894
4918
  achieved_max_rolling_duration = get_max_rolling_duration_achievement(durations, max_consecutive_days)
4895
4919
  MRA = pd.Series(name = str(EWR + '_maxRollingAchievement'), data = achieved_max_rolling_duration, index = unique_water_years)
4896
- PU_df = pd.concat([PU_df, MRA], axis = 1)
4920
+ # PU_df = pd.concat([PU_df, MRA], axis = 1)
4897
4921
  # Append information around available and missing data:
4898
4922
  yearly_gap = get_data_gap(df, water_years, gauge)
4899
4923
  total_days = get_total_series_days(water_years)
4900
4924
  YG = pd.Series(name = str(EWR + '_missingDays'), data = yearly_gap, index = unique_water_years)
4901
- TD = pd.Series(name = str(EWR + '_totalPossibleDays'), data = total_days, index = unique_water_years)
4902
- PU_df = pd.concat([PU_df, YG], axis = 1)
4903
- PU_df = pd.concat([PU_df, TD], axis = 1)
4925
+ TD_B = pd.Series(name = str(EWR + '_totalPossibleDays'), data = total_days, index = unique_water_years)
4926
+ # PU_df = pd.concat([PU_df, YG], axis = 1)
4927
+ # PU_df = pd.concat([PU_df, TD], axis = 1)
4928
+ PU_df = pd.concat(
4929
+ [PU_df,
4930
+ YWE,
4931
+ NEA,
4932
+ NE,
4933
+ NEALL,
4934
+ MIP,
4935
+ MIPA,
4936
+ AL,
4937
+ ALA,
4938
+ TD_A,
4939
+ TDA,
4940
+ MD,
4941
+ MR,
4942
+ MRA,
4943
+ YG,
4944
+ TD_B
4945
+ ],
4946
+ axis=1
4947
+ )
4904
4948
 
4905
4949
  return PU_df
4906
4950