py-ewr 2.2.1__tar.gz → 2.2.4__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (29) hide show
  1. {py_ewr-2.2.1 → py_ewr-2.2.4}/PKG-INFO +4 -3
  2. {py_ewr-2.2.1 → py_ewr-2.2.4}/README.md +3 -1
  3. {py_ewr-2.2.1 → py_ewr-2.2.4}/py_ewr/data_inputs.py +24 -0
  4. {py_ewr-2.2.1 → py_ewr-2.2.4}/py_ewr/evaluate_EWRs.py +14 -17
  5. {py_ewr-2.2.1 → py_ewr-2.2.4}/py_ewr/observed_handling.py +2 -3
  6. {py_ewr-2.2.1 → py_ewr-2.2.4}/py_ewr/scenario_handling.py +51 -10
  7. {py_ewr-2.2.1 → py_ewr-2.2.4}/py_ewr.egg-info/PKG-INFO +4 -3
  8. {py_ewr-2.2.1 → py_ewr-2.2.4}/py_ewr.egg-info/requires.txt +0 -1
  9. {py_ewr-2.2.1 → py_ewr-2.2.4}/setup.py +1 -2
  10. {py_ewr-2.2.1 → py_ewr-2.2.4}/tests/test_evaluate_ewr_rest.py +43 -43
  11. {py_ewr-2.2.1 → py_ewr-2.2.4}/tests/test_evaluate_ewrs.py +22 -22
  12. {py_ewr-2.2.1 → py_ewr-2.2.4}/tests/test_observed_handling.py +1 -1
  13. {py_ewr-2.2.1 → py_ewr-2.2.4}/tests/test_scenario_handling.py +33 -1
  14. {py_ewr-2.2.1 → py_ewr-2.2.4}/LICENSE +0 -0
  15. {py_ewr-2.2.1 → py_ewr-2.2.4}/py_ewr/__init__.py +0 -0
  16. {py_ewr-2.2.1 → py_ewr-2.2.4}/py_ewr/io.py +0 -0
  17. {py_ewr-2.2.1 → py_ewr-2.2.4}/py_ewr/model_metadata/SiteID_MDBA.csv +0 -0
  18. {py_ewr-2.2.1 → py_ewr-2.2.4}/py_ewr/model_metadata/SiteID_NSW.csv +0 -0
  19. {py_ewr-2.2.1 → py_ewr-2.2.4}/py_ewr/model_metadata/iqqm_stations.csv +0 -0
  20. {py_ewr-2.2.1 → py_ewr-2.2.4}/py_ewr/parameter_metadata/ewr_calc_config.json +0 -0
  21. {py_ewr-2.2.1 → py_ewr-2.2.4}/py_ewr/parameter_metadata/parameter_sheet.csv +0 -0
  22. {py_ewr-2.2.1 → py_ewr-2.2.4}/py_ewr/summarise_results.py +0 -0
  23. {py_ewr-2.2.1 → py_ewr-2.2.4}/py_ewr.egg-info/SOURCES.txt +0 -0
  24. {py_ewr-2.2.1 → py_ewr-2.2.4}/py_ewr.egg-info/dependency_links.txt +0 -0
  25. {py_ewr-2.2.1 → py_ewr-2.2.4}/py_ewr.egg-info/top_level.txt +0 -0
  26. {py_ewr-2.2.1 → py_ewr-2.2.4}/pyproject.toml +0 -0
  27. {py_ewr-2.2.1 → py_ewr-2.2.4}/setup.cfg +0 -0
  28. {py_ewr-2.2.1 → py_ewr-2.2.4}/tests/test_data_inputs.py +0 -0
  29. {py_ewr-2.2.1 → py_ewr-2.2.4}/tests/test_summarise_results.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: py_ewr
3
- Version: 2.2.1
3
+ Version: 2.2.4
4
4
  Summary: Environmental Water Requirement calculator
5
5
  Home-page: https://github.com/MDBAuth/EWR_tool
6
6
  Author: Martin Job
@@ -23,7 +23,6 @@ Requires-Dist: ipython==8.8.0
23
23
  Requires-Dist: ipywidgets==7.7.0
24
24
  Requires-Dist: pandas==2.0.3
25
25
  Requires-Dist: requests==2.25.1
26
- Requires-Dist: tqdm>=4.66.1
27
26
  Requires-Dist: mdba-gauge-getter==0.5.1
28
27
  Requires-Dist: cachetools==5.2.0
29
28
  Requires-Dist: xarray==2023.01.0
@@ -35,9 +34,11 @@ Requires-Dist: numpy<2
35
34
  [![PyPI](https://img.shields.io/pypi/v/py-ewr)](https://pypi.org/project/py-ewr/)
36
35
  [![DOI](https://zenodo.org/badge/342122359.svg)](https://zenodo.org/badge/latestdoi/342122359)
37
36
 
38
- ### **EWR tool version 2.2.1 README**
37
+ ### **EWR tool version 2.2.4 README**
39
38
 
40
39
  ### **Notes on recent version update**
40
+ - Remove TQDM loading bars
41
+ - Handle duplicate sites in MDBA siteID file - where a duplicate exists, the first match is used and the rest are skipped over
41
42
  - Adding new model format handling - 'IQQM - netcdf'
42
43
  - Standard time-series handling added - each column needs a gauge, followed by and underscore, followed by either flow or level (e.g. 409025_flow). This handling also has missing date filling - so any missing dates will be filled with NaN values in all columns.
43
44
  - ten thousand year handling - This has been briefly taken offline for this version.
@@ -3,9 +3,11 @@
3
3
  [![PyPI](https://img.shields.io/pypi/v/py-ewr)](https://pypi.org/project/py-ewr/)
4
4
  [![DOI](https://zenodo.org/badge/342122359.svg)](https://zenodo.org/badge/latestdoi/342122359)
5
5
 
6
- ### **EWR tool version 2.2.1 README**
6
+ ### **EWR tool version 2.2.4 README**
7
7
 
8
8
  ### **Notes on recent version update**
9
+ - Remove TQDM loading bars
10
+ - Handle duplicate sites in MDBA siteID file - where a duplicate exists, the first match is used and the rest are skipped over
9
11
  - Adding new model format handling - 'IQQM - netcdf'
10
12
  - Standard time-series handling added - each column needs a gauge, followed by and underscore, followed by either flow or level (e.g. 409025_flow). This handling also has missing date filling - so any missing dates will be filled with NaN values in all columns.
11
13
  - ten thousand year handling - This has been briefly taken offline for this version.
@@ -416,3 +416,27 @@ def get_scenario_gauges(gauge_results: dict) -> list:
416
416
  for gauge in scenario.keys():
417
417
  scenario_gauges.append(gauge)
418
418
  return list(set(scenario_gauges))
419
+
420
+
421
+ def gauge_groups(parameter_sheet: pd.DataFrame) -> dict:
422
+ '''
423
+ Returns a dictionary of flow, level, and lake level gauges based on the parameter sheet and some hard coding of other EWRs
424
+
425
+ Args:
426
+ parameter_sheet (pd.DataFrame): input parameter sheet
427
+
428
+ Returns:
429
+ dict: keys as flow, level, and lake level gauges, values as the list of gauges
430
+ '''
431
+
432
+ # Hard coded gauges for the CLLMM EWRs
433
+ hard_code_levels = ['A4260527', 'A4260524', 'A4260633', 'A4261209', 'A4261165']
434
+ hard_code_lake_levels = ['A4261133', 'A4260574', 'A4260575']
435
+
436
+ flow_gauges = set(parameter_sheet[parameter_sheet['GaugeType'] == 'F']['Gauge']) + set(parameter_sheet['Multigauge'])
437
+ level_gauges = set(parameter_sheet[parameter_sheet['GaugeType'] == 'L']['Gauge']) + set(parameter_sheet['WeirpoolGauge']) + set(hard_code_levels)
438
+ lake_level_gauges = set(parameter_sheet[parameter_sheet['GaugeType'] == 'LL']['Gauge'])+set(hard_code_lake_levels)
439
+
440
+ return flow_gauges, level_gauges, lake_level_gauges
441
+
442
+ # def gauges_to_measurand()
@@ -10,7 +10,6 @@ import logging
10
10
 
11
11
  import pandas as pd
12
12
  import numpy as np
13
- from tqdm import tqdm
14
13
 
15
14
  from . import data_inputs
16
15
 
@@ -446,8 +445,8 @@ def get_index_date(date_index:Any)-> datetime.date:
446
445
  """
447
446
  if type(date_index) == pd._libs.tslibs.timestamps.Timestamp:
448
447
  return date_index.date()
449
- if type(date_index) == pd._libs.tslibs.period.Period:
450
- return date_index.to_timestamp().date()
448
+ # if type(date_index) == pd._libs.tslibs.period.Period:
449
+ # return date_index.date()#.to_timestamp()
451
450
  else:
452
451
  return date_index
453
452
 
@@ -1959,7 +1958,7 @@ def water_stability_check(EWR_info:Dict, iteration:int, flows:List, all_events:D
1959
1958
  if levels_are_stable:
1960
1959
  # record event opportunity for the next n days for the total period of (EggDaysSpell)+ larvae (LarvaeDaysSpell)
1961
1960
  # if the last day of the event is not over the last day of the event window
1962
- iteration_date = flow_date.to_timestamp().date()
1961
+ iteration_date = flow_date.date()#flow_date.to_timestamp().date()
1963
1962
  last_day_window = get_last_day_of_window(iteration_date, EWR_info['end_month'])
1964
1963
  event_size = EWR_info['eggs_days_spell'] + EWR_info['larvae_days_spell']
1965
1964
  if is_date_in_window(iteration_date, last_day_window, event_size):
@@ -1996,7 +1995,7 @@ def water_stability_level_check(EWR_info:Dict, iteration:int, all_events:Dict, w
1996
1995
  if levels_are_stable:
1997
1996
  # record event opportunity for the next n days for the total period of (EggDaysSpell)+ larvae (LarvaeDaysSpell)
1998
1997
  # if the last day of the event is not over the last day of the event window
1999
- iteration_date = flow_date.to_timestamp().date()
1998
+ iteration_date = flow_date.date()#flow_date.to_timestamp().date()
2000
1999
  last_day_window = get_last_day_of_window(iteration_date, EWR_info['end_month'])
2001
2000
  event_size = EWR_info['eggs_days_spell'] + EWR_info['larvae_days_spell']
2002
2001
  if is_date_in_window(iteration_date, last_day_window, event_size):
@@ -2605,7 +2604,7 @@ def lower_lakes_level_check(EWR_info: dict, levels: pd.Series, event: list, all_
2605
2604
  #------------------------------------ Calculation functions --------------------------------------#
2606
2605
 
2607
2606
 
2608
- def create_water_stability_event(flow_date: pd.Period, flows:List, iteration: int, EWR_info:dict)->List:
2607
+ def create_water_stability_event(flow_date: pd.Timestamp, flows:List, iteration: int, EWR_info:dict)->List:#pd.Period
2609
2608
  """create overlapping event that meets an achievement for fish recruitment water stability
2610
2609
 
2611
2610
  Args:
@@ -2618,7 +2617,7 @@ def create_water_stability_event(flow_date: pd.Period, flows:List, iteration: in
2618
2617
  """
2619
2618
  event_size = EWR_info['eggs_days_spell'] + EWR_info['larvae_days_spell']
2620
2619
  event_flows = flows[iteration: iteration + event_size]
2621
- start_event_date = flow_date.to_timestamp().date()
2620
+ start_event_date = flow_date.date()#flow_date.to_timestamp().date()
2622
2621
  event_dates = [ start_event_date + timedelta(i) for i in range(event_size)]
2623
2622
 
2624
2623
  return [(d, flow) for d, flow in zip(event_dates, event_flows)]
@@ -3811,16 +3810,16 @@ def nest_calc_percent_trigger(EWR_info:Dict, flows:List, water_years:List, dates
3811
3810
  flow_percent_change = calc_flow_percent_change(i, flows)
3812
3811
  trigger_day = date(dates[i].year,EWR_info["trigger_month"], EWR_info["trigger_day"])
3813
3812
  cut_date = calc_nest_cut_date(EWR_info, i, dates)
3814
- is_in_trigger_window = dates[i].to_timestamp().date() >= trigger_day \
3815
- and dates[i].to_timestamp().date() <= trigger_day + timedelta(days=14)
3813
+ is_in_trigger_window = dates[i].date() >= trigger_day \
3814
+ and dates[i].date() <= trigger_day + timedelta(days=14) #.to_timestamp() .to_timestamp()
3816
3815
  iteration_no_event = 0
3817
3816
 
3818
3817
  ## if there IS an ongoing event check if we are on the trigger season window
3819
3818
  # if yes then check the current flow
3820
3819
  if total_event > 0:
3821
- if (dates[i].to_timestamp().date() >= trigger_day) and (dates[i].to_timestamp().date() <= cut_date):
3820
+ if (dates[i].date() >= trigger_day) and (dates[i].date() <= cut_date):
3822
3821
  event, all_events, gap_track, total_event, iteration_no_event = nest_flow_check(EWR_info, i, flow, event, all_events,
3823
- gap_track, water_years, total_event, flow_date, flow_percent_change, iteration_no_event)
3822
+ gap_track, water_years, total_event, flow_date, flow_percent_change, iteration_no_event) #.to_timestamp() .to_timestamp()
3824
3823
 
3825
3824
  # this path will only be executed if an event extends beyond the cut date
3826
3825
  else:
@@ -3841,12 +3840,12 @@ def nest_calc_percent_trigger(EWR_info:Dict, flows:List, water_years:List, dates
3841
3840
 
3842
3841
  # Check final iteration in the flow timeseries, saving any ongoing events/event gaps to their spots in the dictionaries:
3843
3842
  # reset all variable to last flow
3844
- flow_date = dates[-1].to_timestamp().date()
3843
+ flow_date = dates[-1].date()#.to_timestamp()
3845
3844
  flow_percent_change = calc_flow_percent_change(-1, flows)
3846
3845
  trigger_day = date(dates[-1].year,EWR_info["trigger_month"], EWR_info["trigger_day"])
3847
3846
  cut_date = calc_nest_cut_date(EWR_info, -1, dates)
3848
- is_in_trigger_window = dates[-1].to_timestamp().date() >= trigger_day - timedelta(days=7) \
3849
- and dates[-1].to_timestamp().date() <= trigger_day + timedelta(days=7)
3847
+ is_in_trigger_window = dates[-1].date() >= trigger_day - timedelta(days=7) \
3848
+ and dates[-1].date() <= trigger_day + timedelta(days=7) #.to_timestamp() .to_timestamp()
3850
3849
  iteration_no_event = 0
3851
3850
 
3852
3851
  if total_event > 0:
@@ -5086,9 +5085,7 @@ def calc_sorter(df_F:pd.DataFrame, df_L:pd.DataFrame, gauge:str, EWR_table:pd.Da
5086
5085
  EWR_codes = PU_table['Code']
5087
5086
  PU_df = pd.DataFrame()
5088
5087
  PU_events = {}
5089
- for i, EWR in enumerate(tqdm(EWR_codes, position = 0, leave = False,
5090
- bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}',
5091
- desc= str('Evaluating ewrs for '+ gauge))):
5088
+ for i, EWR in enumerate(EWR_codes):
5092
5089
  events = {}
5093
5090
 
5094
5091
  MULTIGAUGE = is_multigauge(EWR_table, gauge, EWR, PU)
@@ -3,7 +3,6 @@ from typing import Dict, List
3
3
  import logging
4
4
 
5
5
  import pandas as pd
6
- from tqdm import tqdm
7
6
  import numpy as np
8
7
 
9
8
  from . import data_inputs, evaluate_EWRs, summarise_results, scenario_handling
@@ -131,7 +130,7 @@ def observed_cleaner(input_df: pd.DataFrame, dates: dict) -> pd.DataFrame:
131
130
  start_date = datetime(dates['start_date'].year, dates['start_date'].month, dates['start_date'].day)
132
131
  end_date = datetime(dates['end_date'].year, dates['end_date'].month, dates['end_date'].day)
133
132
 
134
- df_index = pd.date_range(start=start_date,end=end_date - timedelta(days=1)).to_period()
133
+ df_index = pd.date_range(start=start_date,end=end_date - timedelta(days=1))#.to_period()
135
134
  gauge_data_df = pd.DataFrame()
136
135
  gauge_data_df['Date'] = df_index
137
136
  gauge_data_df = gauge_data_df.set_index('Date')
@@ -140,7 +139,7 @@ def observed_cleaner(input_df: pd.DataFrame, dates: dict) -> pd.DataFrame:
140
139
 
141
140
 
142
141
  input_df['Date'] = pd.to_datetime(input_df['DATETIME'], format = '%Y-%m-%d')
143
- input_df['Date'] = input_df['Date'].apply(lambda x: x.to_period(freq='D'))
142
+ # input_df['Date'] = input_df['Date'].apply(lambda x: x.to_period(freq='D'))
144
143
 
145
144
  # Check with states for more codes:
146
145
  bad_data_codes = data_inputs.get_bad_QA_codes()
@@ -7,7 +7,6 @@ from datetime import datetime, date
7
7
  import logging
8
8
 
9
9
  import pandas as pd
10
- from tqdm import tqdm
11
10
  import xarray as xr
12
11
  import netCDF4
13
12
 
@@ -259,7 +258,7 @@ def cleaner_MDBA(input_df: pd.DataFrame) -> pd.DataFrame:
259
258
 
260
259
  cleaned_df = input_df.rename(columns={'Mn': 'Month', 'Dy': 'Day'})
261
260
  cleaned_df['Date'] = pd.to_datetime(cleaned_df[['Year', 'Month', 'Day']], format = '%Y-%m-%d')
262
- cleaned_df['Date'] = cleaned_df['Date'].apply(lambda x: x.to_period(freq='D'))
261
+ # cleaned_df['Date'] = cleaned_df['Date'].apply(lambda x: x.to_period(freq='D'))
263
262
  cleaned_df = cleaned_df.drop(['Day', 'Month', 'Year'], axis = 1)
264
263
  cleaned_df = cleaned_df.set_index('Date')
265
264
 
@@ -437,6 +436,42 @@ def extract_gauge_from_string(input_string: str) -> str:
437
436
  gauge = input_string.split('_')[0]
438
437
  return gauge
439
438
 
439
+ # def match_MDBA_nodes_dev(input_df: pd.DataFrame, model_metadata: pd.DataFrame, ewr_table_path: str) -> tuple:
440
+ # '''
441
+ # Iterate over the gauges in the parameter sheet,
442
+ # find all the occurences of that gauge in the ARWC column in the model metadata file,
443
+ # for each match, search for the matching siteID in the model file,
444
+ # append the column to the flow dataframe.
445
+
446
+ # Args:
447
+ # input_df (pd.DataFrame): flow/water level dataframe
448
+ # model_metadata (pd.DataFrame): dataframe linking model nodes to gauges
449
+
450
+ # Returns:
451
+ # tuple[pd.DataFrame, pd.DataFrame]: flow dataframe, water level dataframe
452
+
453
+ # '''
454
+ # df_flow = pd.DataFrame(index = input_df.index)
455
+ # df_level = pd.DataFrame(index = input_df.index)
456
+
457
+ # unique_gauges = #Get unique gauges from the parameter sheet
458
+ # #TODO: include logic to have the measurand included
459
+ # for i in unique_gauges:
460
+ # # Subset of the SiteID file with the gauges
461
+ # subset_df = model_metadata[model_metadata['AWRC'] == i]
462
+ # # Iterate over the occurences of the gauge and check if the matching SiteID file is in the model file
463
+ # for j in subset_df.iterrows:
464
+ # site_mm = j['SITEID']
465
+ # if site_mm in input_df.columns:
466
+ # df_flow[i] = input_df[site_mm+INPUT_MEASURAND+ANY_QUALITY_CODE]
467
+ # or
468
+ # df_level[i] = input_df[site_mm+INPUT_MEASURAND+ANY_QUALITY_CODE]
469
+
470
+ # if df_flow.empty and df_level.empty:
471
+ # raise ValueError('No relevant gauges and or measurands found in dataset, the EWR tool cannot evaluate this model output file')
472
+ # return df_flow, df_level
473
+
474
+
440
475
  def match_MDBA_nodes(input_df: pd.DataFrame, model_metadata: pd.DataFrame, ewr_table_path: str) -> tuple:
441
476
  '''Checks if the source file columns have EWRs available, returns a flow and level dataframe with only
442
477
  the columns with EWRs available. Renames columns to gauges
@@ -461,11 +496,19 @@ def match_MDBA_nodes(input_df: pd.DataFrame, model_metadata: pd.DataFrame, ewr_t
461
496
  measure = col_clean.split('-')[1]
462
497
  if ((measure in measurands) and (model_metadata['SITEID'] == site).any()):
463
498
  subset = model_metadata.query("SITEID==@site")
464
- gauge = subset["AWRC"].iloc[0]
465
- if gauge in flow_gauges and measure == '1':
466
- df_flow[gauge] = input_df[col]
467
- if gauge in level_gauges and measure == '35':
468
- df_level[gauge] = input_df[col]
499
+ for iset in range(len(subset)):
500
+ gauge = subset["AWRC"].iloc[iset]
501
+ if gauge in flow_gauges and measure == '1':
502
+ df_flow[gauge] = input_df[col]
503
+ if gauge in level_gauges and measure == '35':
504
+ aa=input_df[[col]]
505
+ if (len(aa.columns)>1):
506
+ print('More than one site has been identified, the first site is used')
507
+ print('Site info: ', col)
508
+ df_level[gauge] = aa.iloc[:,0]
509
+ else:
510
+ df_level[gauge] = input_df[col]
511
+
469
512
  if df_flow.empty:
470
513
  raise ValueError('No relevant gauges and or measurands found in dataset, the EWR tool cannot evaluate this model output file')
471
514
  return df_flow, df_level
@@ -538,9 +581,7 @@ class ScenarioHandler:
538
581
  # Analyse all scenarios for EWRs
539
582
  detailed_results = {}
540
583
  detailed_events = {}
541
- for scenario in tqdm(scenarios, position = 0, leave = True,
542
- bar_format='{l_bar}{bar:10}{r_bar}{bar:-10b}',
543
- desc= 'Evaluating scenarios'):
584
+ for scenario in scenarios:
544
585
  if self.model_format == 'Bigmod - MDBA':
545
586
 
546
587
  data, header = unpack_model_file(scenarios[scenario], 'Dy', 'Field')
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: py_ewr
3
- Version: 2.2.1
3
+ Version: 2.2.4
4
4
  Summary: Environmental Water Requirement calculator
5
5
  Home-page: https://github.com/MDBAuth/EWR_tool
6
6
  Author: Martin Job
@@ -23,7 +23,6 @@ Requires-Dist: ipython==8.8.0
23
23
  Requires-Dist: ipywidgets==7.7.0
24
24
  Requires-Dist: pandas==2.0.3
25
25
  Requires-Dist: requests==2.25.1
26
- Requires-Dist: tqdm>=4.66.1
27
26
  Requires-Dist: mdba-gauge-getter==0.5.1
28
27
  Requires-Dist: cachetools==5.2.0
29
28
  Requires-Dist: xarray==2023.01.0
@@ -35,9 +34,11 @@ Requires-Dist: numpy<2
35
34
  [![PyPI](https://img.shields.io/pypi/v/py-ewr)](https://pypi.org/project/py-ewr/)
36
35
  [![DOI](https://zenodo.org/badge/342122359.svg)](https://zenodo.org/badge/latestdoi/342122359)
37
36
 
38
- ### **EWR tool version 2.2.1 README**
37
+ ### **EWR tool version 2.2.4 README**
39
38
 
40
39
  ### **Notes on recent version update**
40
+ - Remove TQDM loading bars
41
+ - Handle duplicate sites in MDBA siteID file - where a duplicate exists, the first match is used and the rest are skipped over
41
42
  - Adding new model format handling - 'IQQM - netcdf'
42
43
  - Standard time-series handling added - each column needs a gauge, followed by and underscore, followed by either flow or level (e.g. 409025_flow). This handling also has missing date filling - so any missing dates will be filled with NaN values in all columns.
43
44
  - ten thousand year handling - This has been briefly taken offline for this version.
@@ -2,7 +2,6 @@ ipython==8.8.0
2
2
  ipywidgets==7.7.0
3
3
  pandas==2.0.3
4
4
  requests==2.25.1
5
- tqdm>=4.66.1
6
5
  mdba-gauge-getter==0.5.1
7
6
  cachetools==5.2.0
8
7
  xarray==2023.01.0
@@ -6,7 +6,7 @@ with open("README.md", "r", encoding="utf-8") as fh:
6
6
 
7
7
  setup(
8
8
  name="py_ewr",
9
- version="2.2.1",
9
+ version="2.2.4",
10
10
  author="Martin Job",
11
11
  author_email="Martin.Job@mdba.gov.au",
12
12
  description="Environmental Water Requirement calculator",
@@ -35,7 +35,6 @@ setup(
35
35
  "ipywidgets==7.7.0",
36
36
  "pandas==2.0.3",
37
37
  "requests==2.25.1",
38
- "tqdm>=4.66.1",
39
38
  "mdba-gauge-getter==0.5.1",
40
39
  "cachetools==5.2.0",
41
40
  "xarray==2023.01.0",
@@ -520,8 +520,8 @@ def test_flow_calc(flows,expected_all_events,expected_all_no_events):
520
520
  # Set up input data
521
521
  EWR_info = {'min_flow': 5, 'max_flow': 20, 'gap_tolerance': 0, 'min_event':10, 'duration': 10}
522
522
  water_years = np.array([2012]*365 + [2013]*365 + [2014]*365 + [2015]*366)
523
- dates = pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d')).to_period()
524
- masked_dates = pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d')).to_period()
523
+ dates = pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d'))#.to_period()
524
+ masked_dates = pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d'))#.to_period()
525
525
  # Set up expected output data
526
526
  expected_durations = [10]*4
527
527
  # Send inputs to test function and test
@@ -542,8 +542,8 @@ def test_lowflow_calc():
542
542
  EWR_info = {'min_flow': 10, 'max_flow': 20, 'min_event':1, 'duration': 300, 'duration_VD': 10}
543
543
  flows = np.array([5]*295+[0]*25+[10]*45 + [0]*355+[5000]*10 + [0]*355+[10]*10 + [5]*295+[0]*25+[10]*45+[10]*1)
544
544
  water_years = np.array([2012]*365 + [2013]*365 + [2014]*365 + [2015]*366)
545
- dates = pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d')).to_period()
546
- masked_dates = pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d')).to_period()
545
+ dates = pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d'))#.to_period()
546
+ masked_dates = pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d'))#.to_period()
547
547
  # Set up expected output data
548
548
  expected_all_events = {2012: [[(date(2013, 5, 17), 10), (date(2013, 5, 18), 10), (date(2013, 5, 19), 10),
549
549
  (date(2013, 5, 20), 10), (date(2013, 5, 21), 10), (date(2013, 5, 22), 10), (date(2013, 5, 23), 10),
@@ -590,8 +590,8 @@ def test_lowflow_calc():
590
590
  'duration_VD': 5, 'start_month': 7, 'end_month': 12, 'start_day': None, 'end_day': None}
591
591
  flows = np.array([10]*5+[0]*35+[5]*5+[0]*295+[0]*25 + [0]*355+[5]*10 + [10]*10+[0]*355 + [5]*295+[0]*25+[10]*45+[10]*1)
592
592
  water_years = np.array([2012]*365 + [2013]*365 + [2014]*365 + [2015]*366)
593
- dates = pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d')).to_period()
594
- masked_dates = pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d')).to_period()
593
+ dates = pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d'))#.to_period()
594
+ masked_dates = pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d'))#.to_period()
595
595
  masked_dates = masked_dates[((masked_dates.month >= 7) & (masked_dates.month <= 12))] # Just want the dates in the date range
596
596
  # Set up expected output data
597
597
  expected_all_events = {2012: [[(date(2012, 7, 1), 10), (date(2012, 7, 2), 10), (date(2012, 7, 3), 10),
@@ -618,8 +618,8 @@ def test_ctf_calc():
618
618
  EWR_info = {'min_flow': 0, 'max_flow': 1, 'min_event':5, 'duration': 20, 'duration_VD': 10}
619
619
  flows = np.array([5]*295+[0]*25+[10]*45 + [20]*355+[5000]*5+[0]*5 + [0]*355+[10]*10 + [1]*295+[20]*25+[0]*45+[0]*1)
620
620
  water_years = np.array([2012]*365 + [2013]*365 + [2014]*365 + [2015]*366)
621
- dates = pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d')).to_period()
622
- masked_dates = pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d')).to_period()
621
+ dates = pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d'))#.to_period()
622
+ masked_dates = pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d'))#.to_period()
623
623
  # Set up expected output data
624
624
  expected_all_events = {2012: [[(date(2013, 4, 22)+timedelta(days=i), 0) for i in range(25)]],
625
625
  2013: [[(date(2014, 6, 26)+timedelta(days=i), 0) for i in range(5)]],
@@ -643,8 +643,8 @@ def test_ctf_calc():
643
643
  [10]*10+[0]*355 +
644
644
  [5]*295+[0]*25+[10]*45+[10]*1)
645
645
  water_years = np.array([2012]*365 + [2013]*365 + [2014]*365 + [2015]*366)
646
- dates = pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d')).to_period()
647
- masked_dates = pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d')).to_period()
646
+ dates = pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d'))#.to_period()
647
+ masked_dates = pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d'))#.to_period()
648
648
  masked_dates = masked_dates[((masked_dates.month >= 7) & (masked_dates.month <= 12))] # Just want the dates in the date range
649
649
  # Set up expected output data
650
650
  expected_all_events = {2012: [[(date(2012, 7, 1)+timedelta(days=i), 10) for i in range(5)],
@@ -738,7 +738,7 @@ def test_ctf_calc_anytime(flows, expected_all_events, expected_all_no_events):
738
738
  # Set up input data
739
739
  EWR_info = {'min_flow': 0, 'max_flow': 1, 'min_event':5, 'duration': 20, 'duration_VD': 10}
740
740
  water_years = np.array([2012]*365 + [2013]*365 + [2014]*365 + [2015]*366)
741
- dates = pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d')).to_period()
741
+ dates = pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d'))#.to_period()
742
742
  # Send to test function and then test
743
743
  all_events, durations = evaluate_EWRs.ctf_calc_anytime(EWR_info, flows, water_years, dates)
744
744
  for year in all_events:
@@ -799,7 +799,7 @@ def test_flow_calc_anytime(flows, expected_all_events, expected_all_no_events):
799
799
  expected_durations = [10]*4
800
800
  # Send to test function and then test
801
801
  dates = 1
802
- dates = pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d')).to_period()
802
+ dates = pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d'))#.to_period()
803
803
  all_events, durations = evaluate_EWRs.flow_calc_anytime(EWR_info, flows, water_years, dates)
804
804
 
805
805
  for year in all_events:
@@ -808,8 +808,8 @@ def test_flow_calc_anytime(flows, expected_all_events, expected_all_no_events):
808
808
  assert durations == expected_durations
809
809
 
810
810
 
811
- def test_get_index_date(period_date, stamp_date):
812
- assert evaluate_EWRs.get_index_date(period_date) == evaluate_EWRs.get_index_date(stamp_date)
811
+ def test_get_index_date(datetime_date, stamp_date):#period_date
812
+ assert evaluate_EWRs.get_index_date(datetime_date) == evaluate_EWRs.get_index_date(stamp_date)
813
813
 
814
814
 
815
815
  @pytest.mark.parametrize("EWR_info,flows,expected_all_events,expected_all_no_events",[
@@ -870,7 +870,7 @@ def test_get_index_date(period_date, stamp_date):
870
870
  def test_cumulative_calc(EWR_info, flows, expected_all_events, expected_all_no_events):
871
871
  dates = pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d'))
872
872
  water_years = np.array([2012]*365 + [2013]*365 + [2014]*365 + [2015]*366)
873
- masked_dates = pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d')).to_period()
873
+ masked_dates = pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d'))#.to_period()
874
874
  all_events, durations = evaluate_EWRs.cumulative_calc(EWR_info, flows, water_years, dates, masked_dates)
875
875
 
876
876
  assert all_events == expected_all_events
@@ -932,7 +932,7 @@ def test_cumulative_calc_qld(EWR_info, flows, expected_all_events):
932
932
  """
933
933
  dates = pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d'))
934
934
  water_years = np.array([2012]*365 + [2013]*365 + [2014]*365 + [2015]*366)
935
- masked_dates = pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d')).to_period()
935
+ masked_dates = pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d'))#.to_period()
936
936
  all_events, _ = evaluate_EWRs.cumulative_calc_qld(EWR_info, flows, water_years, dates, masked_dates)
937
937
 
938
938
  assert all_events == expected_all_events
@@ -2112,7 +2112,7 @@ def test_nest_calc_percent_trigger(EWR_info, flows, expected_all_events, expecte
2112
2112
  """
2113
2113
  # non changing parameters
2114
2114
  water_years = np.array([2012]*365 + [2013]*365 + [2014]*365 + [2015]*366)
2115
- dates = pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d')).to_period()
2115
+ dates = pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d'))#.to_period()
2116
2116
 
2117
2117
  all_events, _ = evaluate_EWRs.nest_calc_percent_trigger(EWR_info, flows, water_years, dates)
2118
2118
 
@@ -3176,8 +3176,8 @@ def test_coorong_check(EWR_info, iteration, levels_data, event, all_events,
3176
3176
  ])
3177
3177
  def test_coorong_level_calc(EWR_info, levels_data, expected_all_events):
3178
3178
  water_years = np.array([2012]*365 + [2013]*365 + [2014]*365 + [2015]*366)
3179
- dates = pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d')).to_period()
3180
- masked_dates = masked_dates = pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d')).to_period()
3179
+ dates = pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d'))#.to_period()
3180
+ masked_dates = masked_dates = pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d'))#.to_period()
3181
3181
  levels = pd.Series(levels_data, index=dates)
3182
3182
 
3183
3183
  all_events, _ = evaluate_EWRs.coorong_level_calc(EWR_info, levels, water_years, dates, masked_dates)
@@ -3258,8 +3258,8 @@ def test_lower_lakes_level_check(EWR_info, iteration, levels_data, event, all_ev
3258
3258
  def test_lower_lakes_level_calc(EWR_info, levels_data, expected_all_events):
3259
3259
 
3260
3260
  water_years = np.array([2012]*365 + [2013]*365 + [2014]*365 + [2015]*366)
3261
- dates = pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d')).to_period()
3262
- masked_dates = pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d')).to_period()
3261
+ dates = pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d'))#.to_period()
3262
+ masked_dates = pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d'))#.to_period()
3263
3263
  levels = pd.Series(levels_data, index=dates)
3264
3264
 
3265
3265
  all_events, _ = evaluate_EWRs.lower_lakes_level_calc(EWR_info, levels, water_years, dates, masked_dates)
@@ -3382,8 +3382,8 @@ def test_flow_calc_sa(EWR_info, flows_data, expected_all_events):
3382
3382
  """
3383
3383
 
3384
3384
  water_years = np.array([2012]*365 + [2013]*365 + [2014]*365 + [2015]*366)
3385
- dates = pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d')).to_period()
3386
- masked_dates = pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d')).to_period()
3385
+ dates = pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d'))#.to_period()
3386
+ masked_dates = pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d'))#.to_period()
3387
3387
  flow_series = pd.Series(flows_data, index=dates)
3388
3388
 
3389
3389
  all_events, _ = evaluate_EWRs.flow_calc_sa(EWR_info, flow_series, water_years, dates, masked_dates)
@@ -3805,8 +3805,8 @@ def test_flow_calc_check_ctf(EWR_info,flows_data,expected_all_events):
3805
3805
  '''
3806
3806
 
3807
3807
  water_years = np.array([2012]*365 + [2013]*365 + [2014]*365 + [2015]*366)
3808
- dates = pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d')).to_period()
3809
- masked_dates = pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d')).to_period()
3808
+ dates = pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d'))#.to_period()
3809
+ masked_dates = pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d'))#.to_period()
3810
3810
  flows = pd.Series(flows_data, index=dates)
3811
3811
 
3812
3812
  all_events, _ = evaluate_EWRs.flow_calc_check_ctf(EWR_info, flows, water_years, dates, masked_dates)
@@ -4083,7 +4083,7 @@ def test_cumulative_calc_bbr(EWR_info, flows, levels, expected_all_events, expec
4083
4083
  """
4084
4084
  dates = pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d'))
4085
4085
  water_years = np.array([2012]*365 + [2013]*365 + [2014]*365 + [2015]*366)
4086
- masked_dates = pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d')).to_period()
4086
+ masked_dates = pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d'))#.to_period()
4087
4087
  all_events, _ = evaluate_EWRs.cumulative_calc_bbr(EWR_info, flows, levels, water_years, dates, masked_dates)
4088
4088
 
4089
4089
  assert all_events == expected_all_events
@@ -4117,7 +4117,7 @@ def test_achieved_min_volume(event, EWR_info, expected_result):
4117
4117
 
4118
4118
  @pytest.mark.parametrize("flow_date, flows, iteration, EWR_info, expected_results", [
4119
4119
  (
4120
- pd.Period('2023-05-24', freq='D'),
4120
+ pd.to_datetime('2023-05-24', format='%Y-%m-%d'),#pd.Period('2023-05-24', freq='D'),
4121
4121
  [1,1,3,4,5,6,7,1,1,1],
4122
4122
  6,
4123
4123
  {"larvae_days_spell":1,"eggs_days_spell":2},
@@ -4209,7 +4209,7 @@ def test_check_water_stability_flow(flows, iteration, EWR_info, expected_result)
4209
4209
  ])
4210
4210
  def test_water_stability_check(EWR_info, iteration, flows, all_events, levels, expected_all_events):
4211
4211
 
4212
- dates = pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d')).to_period()
4212
+ dates = pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d'))#.to_period()
4213
4213
  water_years = np.array([2012]*365 + [2013]*365 + [2014]*365 + [2015]*366)
4214
4214
  flow_date = dates[iteration]
4215
4215
 
@@ -4370,9 +4370,9 @@ def test_water_stability_calc(EWR_info, flows, levels, expected_all_events):
4370
4370
  5. meeting 2 opportunity but second one the last day is outside window
4371
4371
  """
4372
4372
 
4373
- dates = pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d')).to_period()
4373
+ dates = pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d'))#.to_period()
4374
4374
  water_years = np.array([2012]*365 + [2013]*365 + [2014]*365 + [2015]*366)
4375
- masked_dates = pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d')).to_period()
4375
+ masked_dates = pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d'))#.to_period()
4376
4376
 
4377
4377
  all_events, _ = evaluate_EWRs.water_stability_calc(EWR_info, flows, levels,water_years, dates, masked_dates)
4378
4378
 
@@ -4508,7 +4508,7 @@ def test_check_water_stability_height(levels, iteration, EWR_info, expected_resu
4508
4508
  ])
4509
4509
  def test_water_stability_level_check(EWR_info, iteration, all_events, levels, expected_all_events):
4510
4510
 
4511
- dates = pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d')).to_period()
4511
+ dates = pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d'))#.to_period()
4512
4512
  water_years = np.array([2012]*365 + [2013]*365 + [2014]*365 + [2015]*366)
4513
4513
  flow_date = dates[iteration]
4514
4514
 
@@ -4573,9 +4573,9 @@ def test_water_stability_level_calc(EWR_info, levels, expected_all_events):
4573
4573
  2. meeting 2 opportunity but second one the last day is outside window
4574
4574
  """
4575
4575
 
4576
- dates = pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d')).to_period()
4576
+ dates = pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d'))#.to_period()
4577
4577
  water_years = np.array([2012]*365 + [2013]*365 + [2014]*365 + [2015]*366)
4578
- masked_dates = pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d')).to_period()
4578
+ masked_dates = pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d'))#.to_period()
4579
4579
 
4580
4580
  all_events, _ = evaluate_EWRs.water_stability_level_calc(EWR_info, levels, water_years, dates, masked_dates)
4581
4581
 
@@ -4801,9 +4801,9 @@ def test_check_period_flow_change_stepped(flows, EWR_info, iteration, mode, expe
4801
4801
  ])
4802
4802
  def test_rate_rise_flow_calc(EWR_info, flows, expected_all_events):
4803
4803
  # non changing variable
4804
- dates = pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d')).to_period()
4804
+ dates = pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d'))#.to_period()
4805
4805
  water_years = np.array([2012]*365 + [2013]*365 + [2014]*365 + [2015]*366)
4806
- masked_dates = pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d')).to_period()
4806
+ masked_dates = pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d'))#.to_period()
4807
4807
 
4808
4808
  all_events, _ = evaluate_EWRs.rate_rise_flow_calc(EWR_info, flows, water_years, dates, masked_dates)
4809
4809
 
@@ -4960,9 +4960,9 @@ def test_rate_rise_flow_check(EWR_info, iteration, event, all_events, total_even
4960
4960
  ])
4961
4961
  def test_rate_fall_flow_calc(EWR_info, flows, expected_all_events):
4962
4962
  # non changing variable
4963
- dates = pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d')).to_period()
4963
+ dates = pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d'))#.to_period()
4964
4964
  water_years = np.array([2012]*365 + [2013]*365 + [2014]*365 + [2015]*366)
4965
- masked_dates = pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d')).to_period()
4965
+ masked_dates = pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d'))#.to_period()
4966
4966
 
4967
4967
  all_events, _ = evaluate_EWRs.rate_fall_flow_calc(EWR_info, flows, water_years, dates, masked_dates)
4968
4968
 
@@ -5118,9 +5118,9 @@ def test_rate_fall_flow_check(EWR_info, iteration, event, all_events, total_even
5118
5118
  def test_rate_rise_level_calc(EWR_info, levels, expected_all_events):
5119
5119
 
5120
5120
  # non changing variable
5121
- dates = pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d')).to_period()
5121
+ dates = pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d'))#.to_period()
5122
5122
  water_years = np.array([2012]*365 + [2013]*365 + [2014]*365 + [2015]*366)
5123
- masked_dates = pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d')).to_period()
5123
+ masked_dates = pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d'))#.to_period()
5124
5124
 
5125
5125
  all_events, _ = evaluate_EWRs.rate_rise_level_calc(EWR_info, levels, water_years, dates, masked_dates)
5126
5126
 
@@ -5279,9 +5279,9 @@ def test_rate_rise_level_check(EWR_info, iteration, event, all_events, total_eve
5279
5279
  def test_rate_fall_level_calc(EWR_info, levels, expected_all_events):
5280
5280
 
5281
5281
  # non changing variable
5282
- dates = pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d')).to_period()
5282
+ dates = pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d'))#.to_period()
5283
5283
  water_years = np.array([2012]*365 + [2013]*365 + [2014]*365 + [2015]*366)
5284
- masked_dates = pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d')).to_period()
5284
+ masked_dates = pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d'))#.to_period()
5285
5285
 
5286
5286
  all_events, _ = evaluate_EWRs.rate_fall_level_calc(EWR_info, levels, water_years, dates, masked_dates)
5287
5287
 
@@ -5548,9 +5548,9 @@ def test_level_change_check(EWR_info, iteration, event, all_events, total_event,
5548
5548
  def test_level_change_calc(EWR_info, levels, expected_all_events):
5549
5549
 
5550
5550
  # non changing variable
5551
- dates = pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d')).to_period()
5551
+ dates = pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d'))#.to_period()
5552
5552
  water_years = np.array([2012]*365 + [2013]*365 + [2014]*365 + [2015]*366)
5553
- masked_dates = pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d')).to_period()
5553
+ masked_dates = pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d'))#.to_period()
5554
5554
 
5555
5555
  all_events, _ = evaluate_EWRs.level_change_calc(EWR_info, levels, water_years, dates, masked_dates)
5556
5556
 
@@ -16,7 +16,7 @@ def test_ctf_handle():
16
16
  gauge = '410007'
17
17
  EWR = 'CF1'
18
18
  EWR_table, bad_EWRs = data_inputs.get_EWR_table()
19
- data_for_df_F = {'Date': pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d')).to_period(),
19
+ data_for_df_F = {'Date': pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d')),#.to_period(),
20
20
  gauge: [0]*1+[0]*350+[0]*9+[0]*5 + [0]*360+[0]*5 + [0]*10+[0]*345+[0]*1+[0]*9 + [0]*5+[0]*351+[0]*10}
21
21
  df_F = pd.DataFrame(data = data_for_df_F)
22
22
  df_F = df_F.set_index('Date')
@@ -55,7 +55,7 @@ def test_lowflow_handle():
55
55
  gauge = '410007'
56
56
  EWR = 'BF1_a'
57
57
  EWR_table, bad_EWRs = data_inputs.get_EWR_table()
58
- data_for_df_F = {'Date': pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d')).to_period(),
58
+ data_for_df_F = {'Date': pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d')),#.to_period(),
59
59
  gauge: [0]*1+[249]*350+[0]*9+[0]*5 + [0]*360+[0]*5 + [0]*2+[249]*345+[0]*1+[249]*17 + [0]*5+[249]*351+[249]*10}
60
60
  df_F = pd.DataFrame(data = data_for_df_F)
61
61
  df_F = df_F.set_index('Date')
@@ -95,7 +95,7 @@ def test_flow_handle():
95
95
  gauge = '410007'
96
96
  EWR = 'SF1_S'
97
97
  EWR_table, bad_EWRs = data_inputs.get_EWR_table()
98
- data_for_df_F = {'Date': pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d')).to_period(),
98
+ data_for_df_F = {'Date': pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d')),#.to_period(),
99
99
  gauge: [0]*1+[250]*350+[450]*10+[0]*4 +
100
100
  [0]*360+[450]*5 +
101
101
  [450]*5+[250]*345+[0]*1+[450]*14 +
@@ -212,7 +212,7 @@ def test_cumulative_handle_qld(qld_parameter_sheet,expected_events, expected_PU_
212
212
 
213
213
  EWR_table = qld_parameter_sheet
214
214
 
215
- data_for_df_F = {'Date': pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d')).to_period(),
215
+ data_for_df_F = {'Date': pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d')),#.to_period(),
216
216
  '422016': ( [2500]*10+[0]*355 +
217
217
  [0]*365 +
218
218
  [0]*365 +
@@ -245,7 +245,7 @@ def test_level_handle():
245
245
  gauge = '425022'
246
246
  EWR = 'LLLF'
247
247
  EWR_table, bad_EWRs = data_inputs.get_EWR_table()
248
- data_for_df_L = {'Date': pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d')).to_period(),
248
+ data_for_df_L = {'Date': pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d')),#.to_period(),
249
249
  gauge: [0]*1+[0]*260+[56]*90+[0]*1+[0]*4+[0]*9 +
250
250
  [56]*45+[55.9]*1+[56]*45+[0]*269+[0]*3+[19000]*1+[1000]*1 +
251
251
  [0]*5+[0]*345+[0]*1+[0]*13+[56]*1 +
@@ -309,7 +309,7 @@ def test_nest_handle():
309
309
  threshold_flows = threshold_flows + [5300]*50
310
310
  # input data for df_F:
311
311
 
312
- data_for_df_F = {'Date': pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d')).to_period(),
312
+ data_for_df_F = {'Date': pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d')),#.to_period(),
313
313
  gauge: ([0]*76+acceptable_flows+[0]*229 +
314
314
  [0]*76+unnacceptable_flows+[0]*229 +
315
315
  [0]*76+threshold_flows+[0]*229 +
@@ -373,7 +373,7 @@ def test_flow_handle_multi():
373
373
  gauge1_flows = ([0]*76+[1250]*5+[0]*229+[0]*55 + [0]*76+[0]*55+[0]*231+[1250]*3 + [1250]*3+[0]*76+[0]*50+[1250]*5+[0]*231 + [0]*77+[1250]*5+[0]*229+[0]*55)
374
374
  gauge2_flows = ([0]*76+[1250]*5+[0]*229+[0]*55 + [0]*76+[0]*55+[0]*231+[1250]*3 + [1250]*3+[0]*76+[0]*50+[1250]*5+[0]*231 + [0]*76+[1250]*5+[0]*230+[0]*55)
375
375
  EWR_table, bad_EWRs = data_inputs.get_EWR_table()
376
- data_for_df_F = {'Date': pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d')).to_period(),
376
+ data_for_df_F = {'Date': pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d')),#.to_period(),
377
377
  gauge1: gauge1_flows,
378
378
  gauge2: gauge2_flows
379
379
  }
@@ -418,7 +418,7 @@ def test_lowflow_handle_multi():
418
418
  gauge2 = '421088'
419
419
  EWR = 'BF1_a'
420
420
  EWR_table, bad_EWRs = data_inputs.get_EWR_table()
421
- data_for_df_F = {'Date': pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d')).to_period(),
421
+ data_for_df_F = {'Date': pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d')),#.to_period(),
422
422
  gauge1: [40]*76+[1250]*5+[40]*229+[40]*15+[0]*40 + [40]*3+[0]*76+[0]*50+[0]*5+[0]*231 + [40]*75+[0]*50+[40]*230+[40]*10 + [0]*77+[40]*5+[0]*229+[40]*55,
423
423
  gauge2: [40]*76+[1250]*5+[40]*229+[0]*40+[40]*15 + [40]*3+[0]*76+[0]*50+[0]*5+[0]*231 + [40]*75+[0]*50+[40]*230+[40]*10 + [0]*76+[40]*5+[0]*230+[40]*55
424
424
  }
@@ -460,7 +460,7 @@ def test_ctf_handle_multi():
460
460
  gauge2 = '421088'
461
461
  EWR = 'CF'
462
462
  EWR_table, bad_EWRs = data_inputs.get_EWR_table()
463
- data_for_df_F = {'Date': pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d')).to_period(),
463
+ data_for_df_F = {'Date': pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d')),#.to_period(),
464
464
  gauge1: [0]*1+[2]*350+[0]*9+[0]*5 + [2]*360+[0]*5 + [0]*10+[2]*345+[0]*1+[2]*9 + [0]*5+[0]*351+[0]*10,
465
465
  gauge2: [0]*1+[2]*350+[0]*9+[0]*5 + [2]*360+[0]*5 + [0]*10+[2]*345+[0]*1+[2]*9 + [0]*5+[0]*351+[0]*10
466
466
  }
@@ -836,7 +836,7 @@ def test_merge_weirpool_with_freshes(PU_df_wp, wp_freshes, freshes_eventYears, w
836
836
 
837
837
 
838
838
  @pytest.mark.parametrize("data_for_df_F,EWR,main_gauge,expected_events,pu_df_data", [
839
- ({'Date': pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d')).to_period(),
839
+ ({'Date': pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d')),#.to_period(),
840
840
  'A4261002': (
841
841
  [5000]*62 + [16500]*122 + [5000]*181 +
842
842
  [5000]*62 + [16500]*122 + [5000]*181 +
@@ -858,7 +858,7 @@ def test_merge_weirpool_with_freshes(PU_df_wp, wp_freshes, freshes_eventYears, w
858
858
  'CLLMM1a_P_maxRollingEvents': [1, 1, 1, 1], 'CLLMM1a_P_maxRollingAchievement': [1, 1, 1, 1],
859
859
  'CLLMM1a_P_missingDays': [0,0,0,0], 'CLLMM1a_P_totalPossibleDays': [365,365,365,366]}
860
860
  ),
861
- ({'Date': pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d')).to_period(),
861
+ ({'Date': pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d')),#.to_period(),
862
862
  'A4261002': (
863
863
  [5000]*62 + [16500]*122 + [5000]*181 +
864
864
  [5000]*62 + [16500]*122 + [5000]*181 +
@@ -948,7 +948,7 @@ def test_barrage_level_handle(sa_parameter_sheet, expected_events, expected_PU_d
948
948
  gauge_levels_data = { gauge:gauge_levels for gauge in barrage_gauges }
949
949
 
950
950
  EWR_table = sa_parameter_sheet
951
- DATE = {'Date': pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d')).to_period()}
951
+ DATE = {'Date': pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d'))}#.to_period()}
952
952
 
953
953
  data_for_df_L = {**DATE, **gauge_levels_data}
954
954
  df_L = pd.DataFrame(data = data_for_df_L)
@@ -1001,7 +1001,7 @@ def test_flow_handle_sa(sa_parameter_sheet, expected_events, expected_PU_df_data
1001
1001
 
1002
1002
  EWR_table = sa_parameter_sheet
1003
1003
 
1004
- data_for_df_F = {'Date': pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d')).to_period(),
1004
+ data_for_df_F = {'Date': pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d')),#.to_period(),
1005
1005
  gauge: (
1006
1006
  [0]*31+ [400 + i*400 for i in range(30)] + [12001]*61 + [10000] +
1007
1007
  [9900 - i*200 for i in range(30)] + [0]*212 +
@@ -1098,7 +1098,7 @@ def test_flow_handle_check_ctf(qld_parameter_sheet, expected_events, expected_PU
1098
1098
 
1099
1099
  EWR_table = qld_parameter_sheet
1100
1100
 
1101
- data_for_df_F = {'Date': pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d')).to_period(),
1101
+ data_for_df_F = {'Date': pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d')),#.to_period(),
1102
1102
  gauge: ( [0]*365 + # first dry spell
1103
1103
  [19]*10 + # in between
1104
1104
  [0]*365 + # second dry spell
@@ -1157,7 +1157,7 @@ def test_cumulative_handle_bbr(qld_parameter_sheet, expected_events, expected_PU
1157
1157
 
1158
1158
  EWR_table = qld_parameter_sheet
1159
1159
 
1160
- data_for_df_F = {'Date': pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d')).to_period(),
1160
+ data_for_df_F = {'Date': pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d')),#.to_period(),
1161
1161
  gauge: (
1162
1162
  [15400]*20+[0]*345 +
1163
1163
  [0]*365 +
@@ -1168,7 +1168,7 @@ def test_cumulative_handle_bbr(qld_parameter_sheet, expected_events, expected_PU
1168
1168
 
1169
1169
  df_F = df_F.set_index('Date')
1170
1170
 
1171
- data_for_df_L = {'Date': pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d')).to_period(),
1171
+ data_for_df_L = {'Date': pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d')),#.to_period(),
1172
1172
  "422034": (
1173
1173
  [1.]*10 +[1.3]*3+[1.]*5+[0]*347 +
1174
1174
  [0]*365 +
@@ -1401,7 +1401,7 @@ def test_water_stability_handle(qld_parameter_sheet, expected_events, expected_P
1401
1401
 
1402
1402
  EWR_table = qld_parameter_sheet
1403
1403
 
1404
- data_for_df_F = {'Date': pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d')).to_period(),
1404
+ data_for_df_F = {'Date': pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d')),#.to_period(),
1405
1405
  gauge: ( [0]*31 + [71]*10 + [0]*324 +
1406
1406
  [0]*365 +
1407
1407
  [0]*365 +
@@ -1410,7 +1410,7 @@ def test_water_stability_handle(qld_parameter_sheet, expected_events, expected_P
1410
1410
 
1411
1411
  df_F = df_F.set_index('Date')
1412
1412
 
1413
- data_for_df_L = {'Date': pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d')).to_period(),
1413
+ data_for_df_L = {'Date': pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d')),#.to_period(),
1414
1414
  "416011": ( [1]*365 +
1415
1415
  [0]*365 +
1416
1416
  [0]*365 +
@@ -1468,7 +1468,7 @@ def test_water_stability_level_handle(qld_parameter_sheet, expected_events, expe
1468
1468
 
1469
1469
  EWR_table = qld_parameter_sheet
1470
1470
 
1471
- data_for_df_L = {'Date': pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d')).to_period(),
1471
+ data_for_df_L = {'Date': pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d')),#.to_period(),
1472
1472
  "422015": ( [2]*31 + [1]*10 + [2]*324 +
1473
1473
  [2]*365 +
1474
1474
  [2]*365 +
@@ -1525,7 +1525,7 @@ def test_flow_handle_anytime(qld_parameter_sheet, expected_events, expected_PU_d
1525
1525
 
1526
1526
  EWR_table = qld_parameter_sheet
1527
1527
 
1528
- data_for_df_F = {'Date': pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d')).to_period(),
1528
+ data_for_df_F = {'Date': pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d')),#.to_period(),
1529
1529
  '416011': ( [0]*350+[5600]*15 +
1530
1530
  [5600]*11+ [0]*354 +
1531
1531
  [0]*365 +
@@ -1675,7 +1675,7 @@ def test_flow_handle_anytime(qld_parameter_sheet, expected_events, expected_PU_d
1675
1675
  def test_rise_and_fall_handle(pu, gauge, ewr, gauge_data, expected_events, expected_PU_df_data, vic_parameter_sheet):
1676
1676
  EWR_table = vic_parameter_sheet
1677
1677
 
1678
- data_for_df = {'Date': pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d')).to_period(),
1678
+ data_for_df = {'Date': pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d')),#.to_period(),
1679
1679
  gauge: gauge_data }
1680
1680
 
1681
1681
 
@@ -1737,7 +1737,7 @@ def test_level_change_handle(pu, gauge, ewr, gauge_data, expected_events, expect
1737
1737
 
1738
1738
  EWR_table = vic_parameter_sheet
1739
1739
 
1740
- data_for_df = {'Date': pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d')).to_period(),
1740
+ data_for_df = {'Date': pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d')),#.to_period(),
1741
1741
  gauge: gauge_data }
1742
1742
 
1743
1743
 
@@ -24,7 +24,7 @@ def test_observed_cleaner():
24
24
  output_df = 'unit_testing_files/observed_flows_test_output.csv'
25
25
  expected_result = pd.read_csv(output_df, index_col = 'Date')
26
26
  expected_result.index = pd.to_datetime(expected_result.index, format='%Y-%m-%d')
27
- expected_result.index = expected_result.index.to_period()
27
+ expected_result.index = expected_result.index#.to_period()
28
28
  expected_result.columns = ['419039']
29
29
  assert_frame_equal(result, expected_result)
30
30
 
@@ -12,7 +12,9 @@ from py_ewr import scenario_handling, data_inputs
12
12
  def test_match_MDBA_nodes():
13
13
  '''
14
14
  1. Ensure dataframe with flows and levels is split into two dataframes (one flow and one level dataframe)
15
+ 2. Ensure first column is used when duplicate columns are loaded
15
16
  '''
17
+ # TEST 1 # >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
16
18
  # Set up input data and pass to test function:
17
19
  model_metadata = data_inputs.get_MDBA_codes()
18
20
  data_df = {'Date': pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d')),
@@ -39,6 +41,36 @@ def test_match_MDBA_nodes():
39
41
  assert_frame_equal(df_F, expected_df_F)
40
42
  assert_frame_equal(df_L, expected_df_L)
41
43
 
44
+ # TEST 2 #>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
45
+ # Set up input data and pass to test function:
46
+ model_metadata = data_inputs.get_MDBA_codes()
47
+ data_df = {'Date': pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d')),
48
+ 'EUSTDS-1-8': [0]*1+[250]*350+[0]*9+[0]*5 + [0]*360+[0]*5 + [0]*2+[250]*345+[0]*1+[250]*17 + [0]*5+[250]*351+[250]*10, # Use
49
+ 'EUSTDS-35-8': [0]*1+[250]*350+[0]*9+[0]*5 + [0]*360+[0]*5 + [0]*2+[250]*345+[0]*1+[250]*17 + [0]*5+[250]*351+[250]*10, # Skip
50
+ 'EUSTUS-35-8': [0]*1+[250]*350+[0]*9+[0]*5 + [0]*360+[0]*5 + [0]*2+[250]*345+[0]*1+[250]*17 + [0]*5+[250]*351+[250]*10, # Skip
51
+ 'EUSTUS-35-8': [0]*1+[250]*350+[0]*9+[0]*5 + [0]*360+[0]*5 + [0]*2+[250]*345+[0]*1+[250]*17 + [0]*5+[250]*351+[250]*10, # Use
52
+ 'EUSTUS-1-8': [1]*1+[250]*350+[0]*9+[0]*5 + [0]*360+[0]*5 + [0]*2+[250]*345+[0]*1+[250]*17 + [0]*5+[250]*351+[250]*10, # Skip
53
+ }
54
+ df = pd.DataFrame(data = data_df)
55
+ df = df.set_index('Date')
56
+
57
+ df_F, df_L = scenario_handling.match_MDBA_nodes(df, model_metadata, 'py_ewr/parameter_metadata/parameter_sheet.csv')
58
+
59
+ # Set up expected outputs and test:
60
+ data_expected_df_L = {'Date': pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d')),
61
+ '414209': [0]*1+[250]*350+[0]*9+[0]*5 + [0]*360+[0]*5 + [0]*2+[250]*345+[0]*1+[250]*17 + [0]*5+[250]*351+[250]*10
62
+ }
63
+ expected_df_L = pd.DataFrame(data_expected_df_L)
64
+ expected_df_L = expected_df_L.set_index('Date')
65
+ data_expected_df_F = {'Date': pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d')),
66
+ '414203': [0]*1+[250]*350+[0]*9+[0]*5 + [0]*360+[0]*5 + [0]*2+[250]*345+[0]*1+[250]*17 + [0]*5+[250]*351+[250]*10
67
+ }
68
+ expected_df_F = pd.DataFrame(data_expected_df_F)
69
+ expected_df_F = expected_df_F.set_index('Date')
70
+
71
+ assert_frame_equal(df_F, expected_df_F)
72
+ assert_frame_equal(df_L, expected_df_L)
73
+
42
74
  def test_match_NSW_nodes():
43
75
  '''
44
76
  1. Check NSW model nodes are mapped correctly to their gauges
@@ -188,7 +220,7 @@ def test_cleaner_MDBA():
188
220
 
189
221
  df_clean = scenario_handling.cleaner_MDBA(df)
190
222
  # Set up expected output data and test:
191
- data_expected_df = {'Date': pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d')).to_period(),
223
+ data_expected_df = {'Date': pd.date_range(start= datetime.strptime('2012-07-01', '%Y-%m-%d'), end = datetime.strptime('2016-06-30', '%Y-%m-%d')),#.to_period(),
192
224
  'EUSTDS-1-8': [0]*1+[250]*350+[0]*9+[0]*5 + [0]*360+[0]*5 + [0]*2+[250]*345+[0]*1+[250]*17 + [0]*5+[250]*351+[250]*10,
193
225
  'EUSTUS-35-8': [0]*1+[250]*350+[0]*9+[0]*5 + [0]*360+[0]*5 + [0]*2+[250]*345+[0]*1+[250]*17 + [0]*5+[250]*351+[250]*10
194
226
  }
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes