py-ewr 2.2.3__py3-none-any.whl → 2.2.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
py_ewr/data_inputs.py CHANGED
@@ -416,3 +416,27 @@ def get_scenario_gauges(gauge_results: dict) -> list:
416
416
  for gauge in scenario.keys():
417
417
  scenario_gauges.append(gauge)
418
418
  return list(set(scenario_gauges))
419
+
420
+
421
+ def gauge_groups(parameter_sheet: pd.DataFrame) -> dict:
422
+ '''
423
+ Returns a dictionary of flow, level, and lake level gauges based on the parameter sheet and some hard coding of other EWRs
424
+
425
+ Args:
426
+ parameter_sheet (pd.DataFrame): input parameter sheet
427
+
428
+ Returns:
429
+ dict: keys as flow, level, and lake level gauges, values as the list of gauges
430
+ '''
431
+
432
+ # Hard coded gauges for the CLLMM EWRs
433
+ hard_code_levels = ['A4260527', 'A4260524', 'A4260633', 'A4261209', 'A4261165']
434
+ hard_code_lake_levels = ['A4261133', 'A4260574', 'A4260575']
435
+
436
+ flow_gauges = set(parameter_sheet[parameter_sheet['GaugeType'] == 'F']['Gauge']) + set(parameter_sheet['Multigauge'])
437
+ level_gauges = set(parameter_sheet[parameter_sheet['GaugeType'] == 'L']['Gauge']) + set(parameter_sheet['WeirpoolGauge']) + set(hard_code_levels)
438
+ lake_level_gauges = set(parameter_sheet[parameter_sheet['GaugeType'] == 'LL']['Gauge'])+set(hard_code_lake_levels)
439
+
440
+ return flow_gauges, level_gauges, lake_level_gauges
441
+
442
+ # def gauges_to_measurand()
py_ewr/evaluate_EWRs.py CHANGED
@@ -445,8 +445,8 @@ def get_index_date(date_index:Any)-> datetime.date:
445
445
  """
446
446
  if type(date_index) == pd._libs.tslibs.timestamps.Timestamp:
447
447
  return date_index.date()
448
- if type(date_index) == pd._libs.tslibs.period.Period:
449
- return date_index.to_timestamp().date()
448
+ # if type(date_index) == pd._libs.tslibs.period.Period:
449
+ # return date_index.date()#.to_timestamp()
450
450
  else:
451
451
  return date_index
452
452
 
@@ -1958,7 +1958,7 @@ def water_stability_check(EWR_info:Dict, iteration:int, flows:List, all_events:D
1958
1958
  if levels_are_stable:
1959
1959
  # record event opportunity for the next n days for the total period of (EggDaysSpell)+ larvae (LarvaeDaysSpell)
1960
1960
  # if the last day of the event is not over the last day of the event window
1961
- iteration_date = flow_date.to_timestamp().date()
1961
+ iteration_date = flow_date.date()#flow_date.to_timestamp().date()
1962
1962
  last_day_window = get_last_day_of_window(iteration_date, EWR_info['end_month'])
1963
1963
  event_size = EWR_info['eggs_days_spell'] + EWR_info['larvae_days_spell']
1964
1964
  if is_date_in_window(iteration_date, last_day_window, event_size):
@@ -1995,7 +1995,7 @@ def water_stability_level_check(EWR_info:Dict, iteration:int, all_events:Dict, w
1995
1995
  if levels_are_stable:
1996
1996
  # record event opportunity for the next n days for the total period of (EggDaysSpell)+ larvae (LarvaeDaysSpell)
1997
1997
  # if the last day of the event is not over the last day of the event window
1998
- iteration_date = flow_date.to_timestamp().date()
1998
+ iteration_date = flow_date.date()#flow_date.to_timestamp().date()
1999
1999
  last_day_window = get_last_day_of_window(iteration_date, EWR_info['end_month'])
2000
2000
  event_size = EWR_info['eggs_days_spell'] + EWR_info['larvae_days_spell']
2001
2001
  if is_date_in_window(iteration_date, last_day_window, event_size):
@@ -2604,7 +2604,7 @@ def lower_lakes_level_check(EWR_info: dict, levels: pd.Series, event: list, all_
2604
2604
  #------------------------------------ Calculation functions --------------------------------------#
2605
2605
 
2606
2606
 
2607
- def create_water_stability_event(flow_date: pd.Period, flows:List, iteration: int, EWR_info:dict)->List:
2607
+ def create_water_stability_event(flow_date: pd.Timestamp, flows:List, iteration: int, EWR_info:dict)->List:#pd.Period
2608
2608
  """create overlapping event that meets an achievement for fish recruitment water stability
2609
2609
 
2610
2610
  Args:
@@ -2617,7 +2617,7 @@ def create_water_stability_event(flow_date: pd.Period, flows:List, iteration: in
2617
2617
  """
2618
2618
  event_size = EWR_info['eggs_days_spell'] + EWR_info['larvae_days_spell']
2619
2619
  event_flows = flows[iteration: iteration + event_size]
2620
- start_event_date = flow_date.to_timestamp().date()
2620
+ start_event_date = flow_date.date()#flow_date.to_timestamp().date()
2621
2621
  event_dates = [ start_event_date + timedelta(i) for i in range(event_size)]
2622
2622
 
2623
2623
  return [(d, flow) for d, flow in zip(event_dates, event_flows)]
@@ -3810,16 +3810,16 @@ def nest_calc_percent_trigger(EWR_info:Dict, flows:List, water_years:List, dates
3810
3810
  flow_percent_change = calc_flow_percent_change(i, flows)
3811
3811
  trigger_day = date(dates[i].year,EWR_info["trigger_month"], EWR_info["trigger_day"])
3812
3812
  cut_date = calc_nest_cut_date(EWR_info, i, dates)
3813
- is_in_trigger_window = dates[i].to_timestamp().date() >= trigger_day \
3814
- and dates[i].to_timestamp().date() <= trigger_day + timedelta(days=14)
3813
+ is_in_trigger_window = dates[i].date() >= trigger_day \
3814
+ and dates[i].date() <= trigger_day + timedelta(days=14) #.to_timestamp() .to_timestamp()
3815
3815
  iteration_no_event = 0
3816
3816
 
3817
3817
  ## if there IS an ongoing event check if we are on the trigger season window
3818
3818
  # if yes then check the current flow
3819
3819
  if total_event > 0:
3820
- if (dates[i].to_timestamp().date() >= trigger_day) and (dates[i].to_timestamp().date() <= cut_date):
3820
+ if (dates[i].date() >= trigger_day) and (dates[i].date() <= cut_date):
3821
3821
  event, all_events, gap_track, total_event, iteration_no_event = nest_flow_check(EWR_info, i, flow, event, all_events,
3822
- gap_track, water_years, total_event, flow_date, flow_percent_change, iteration_no_event)
3822
+ gap_track, water_years, total_event, flow_date, flow_percent_change, iteration_no_event) #.to_timestamp() .to_timestamp()
3823
3823
 
3824
3824
  # this path will only be executed if an event extends beyond the cut date
3825
3825
  else:
@@ -3840,12 +3840,12 @@ def nest_calc_percent_trigger(EWR_info:Dict, flows:List, water_years:List, dates
3840
3840
 
3841
3841
  # Check final iteration in the flow timeseries, saving any ongoing events/event gaps to their spots in the dictionaries:
3842
3842
  # reset all variable to last flow
3843
- flow_date = dates[-1].to_timestamp().date()
3843
+ flow_date = dates[-1].date()#.to_timestamp()
3844
3844
  flow_percent_change = calc_flow_percent_change(-1, flows)
3845
3845
  trigger_day = date(dates[-1].year,EWR_info["trigger_month"], EWR_info["trigger_day"])
3846
3846
  cut_date = calc_nest_cut_date(EWR_info, -1, dates)
3847
- is_in_trigger_window = dates[-1].to_timestamp().date() >= trigger_day - timedelta(days=7) \
3848
- and dates[-1].to_timestamp().date() <= trigger_day + timedelta(days=7)
3847
+ is_in_trigger_window = dates[-1].date() >= trigger_day - timedelta(days=7) \
3848
+ and dates[-1].date() <= trigger_day + timedelta(days=7) #.to_timestamp() .to_timestamp()
3849
3849
  iteration_no_event = 0
3850
3850
 
3851
3851
  if total_event > 0:
@@ -130,7 +130,7 @@ def observed_cleaner(input_df: pd.DataFrame, dates: dict) -> pd.DataFrame:
130
130
  start_date = datetime(dates['start_date'].year, dates['start_date'].month, dates['start_date'].day)
131
131
  end_date = datetime(dates['end_date'].year, dates['end_date'].month, dates['end_date'].day)
132
132
 
133
- df_index = pd.date_range(start=start_date,end=end_date - timedelta(days=1)).to_period()
133
+ df_index = pd.date_range(start=start_date,end=end_date - timedelta(days=1))#.to_period()
134
134
  gauge_data_df = pd.DataFrame()
135
135
  gauge_data_df['Date'] = df_index
136
136
  gauge_data_df = gauge_data_df.set_index('Date')
@@ -139,7 +139,7 @@ def observed_cleaner(input_df: pd.DataFrame, dates: dict) -> pd.DataFrame:
139
139
 
140
140
 
141
141
  input_df['Date'] = pd.to_datetime(input_df['DATETIME'], format = '%Y-%m-%d')
142
- input_df['Date'] = input_df['Date'].apply(lambda x: x.to_period(freq='D'))
142
+ # input_df['Date'] = input_df['Date'].apply(lambda x: x.to_period(freq='D'))
143
143
 
144
144
  # Check with states for more codes:
145
145
  bad_data_codes = data_inputs.get_bad_QA_codes()
@@ -258,7 +258,7 @@ def cleaner_MDBA(input_df: pd.DataFrame) -> pd.DataFrame:
258
258
 
259
259
  cleaned_df = input_df.rename(columns={'Mn': 'Month', 'Dy': 'Day'})
260
260
  cleaned_df['Date'] = pd.to_datetime(cleaned_df[['Year', 'Month', 'Day']], format = '%Y-%m-%d')
261
- cleaned_df['Date'] = cleaned_df['Date'].apply(lambda x: x.to_period(freq='D'))
261
+ # cleaned_df['Date'] = cleaned_df['Date'].apply(lambda x: x.to_period(freq='D'))
262
262
  cleaned_df = cleaned_df.drop(['Day', 'Month', 'Year'], axis = 1)
263
263
  cleaned_df = cleaned_df.set_index('Date')
264
264
 
@@ -436,6 +436,40 @@ def extract_gauge_from_string(input_string: str) -> str:
436
436
  gauge = input_string.split('_')[0]
437
437
  return gauge
438
438
 
439
+ # def match_MDBA_nodes_dev(input_df: pd.DataFrame, model_metadata: pd.DataFrame, ewr_table_path: str) -> tuple:
440
+ # '''
441
+ # Iterate over the gauges in the parameter sheet,
442
+ # find all the occurences of that gauge in the ARWC column in the model metadata file,
443
+ # for each match, search for the matching siteID in the model file,
444
+ # append the column to the flow dataframe.
445
+
446
+ # Args:
447
+ # input_df (pd.DataFrame): flow/water level dataframe
448
+ # model_metadata (pd.DataFrame): dataframe linking model nodes to gauges
449
+
450
+ # Returns:
451
+ # tuple[pd.DataFrame, pd.DataFrame]: flow dataframe, water level dataframe
452
+
453
+ # '''
454
+ # df_flow = pd.DataFrame(index = input_df.index)
455
+ # df_level = pd.DataFrame(index = input_df.index)
456
+
457
+ # unique_gauges = #Get unique gauges from the parameter sheet
458
+ # #TODO: include logic to have the measurand included
459
+ # for i in unique_gauges:
460
+ # # Subset of the SiteID file with the gauges
461
+ # subset_df = model_metadata[model_metadata['AWRC'] == i]
462
+ # # Iterate over the occurences of the gauge and check if the matching SiteID file is in the model file
463
+ # for j in subset_df.iterrows:
464
+ # site_mm = j['SITEID']
465
+ # if site_mm in input_df.columns:
466
+ # df_flow[i] = input_df[site_mm+INPUT_MEASURAND+ANY_QUALITY_CODE]
467
+ # or
468
+ # df_level[i] = input_df[site_mm+INPUT_MEASURAND+ANY_QUALITY_CODE]
469
+
470
+ # if df_flow.empty and df_level.empty:
471
+ # raise ValueError('No relevant gauges and or measurands found in dataset, the EWR tool cannot evaluate this model output file')
472
+ # return df_flow, df_level
439
473
 
440
474
 
441
475
  def match_MDBA_nodes(input_df: pd.DataFrame, model_metadata: pd.DataFrame, ewr_table_path: str) -> tuple:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: py_ewr
3
- Version: 2.2.3
3
+ Version: 2.2.4
4
4
  Summary: Environmental Water Requirement calculator
5
5
  Home-page: https://github.com/MDBAuth/EWR_tool
6
6
  Author: Martin Job
@@ -34,7 +34,7 @@ Requires-Dist: numpy <2
34
34
  [![PyPI](https://img.shields.io/pypi/v/py-ewr)](https://pypi.org/project/py-ewr/)
35
35
  [![DOI](https://zenodo.org/badge/342122359.svg)](https://zenodo.org/badge/latestdoi/342122359)
36
36
 
37
- ### **EWR tool version 2.2.3 README**
37
+ ### **EWR tool version 2.2.4 README**
38
38
 
39
39
  ### **Notes on recent version update**
40
40
  - Remove TQDM loading bars
@@ -1,17 +1,17 @@
1
1
  py_ewr/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
- py_ewr/data_inputs.py,sha256=SmY92Xq7oAzkHsJgxncVzC1C9YqnLpDAbVadBF0nS7k,18440
3
- py_ewr/evaluate_EWRs.py,sha256=Ss4sXRIiPpj9txrJ9IdJb6OpzzTJzKzXB1qcxzeR1UE,229059
2
+ py_ewr/data_inputs.py,sha256=PtpFyikT_IoMj_Ont6UykSdyiZXapny_Yk1J9VxParQ,19494
3
+ py_ewr/evaluate_EWRs.py,sha256=09sDcSXW-mH3E8U4fxLp5pR55ISFOs2dZhc40eqoL4k,229138
4
4
  py_ewr/io.py,sha256=Is0xPAzLx6-ylpTFyYJxMimkNVxxoTxUcknTk6bQbgs,840
5
- py_ewr/observed_handling.py,sha256=j4M3YUcncS1hhVR7OIB0C_XuRW09koKHrAL4n0l_yAY,17871
6
- py_ewr/scenario_handling.py,sha256=1H9EY94PJGitOyhiaIHzgGt5FN1ztJxUV2-LYBGiSic,32636
5
+ py_ewr/observed_handling.py,sha256=mzdJoAUqhFsu3CIkJup_cXiAGLkxEDICnAzemBgvMG4,17874
6
+ py_ewr/scenario_handling.py,sha256=s-Es9n4YXdLhFFsT8ypYKfZL7_puRxLJRuFip1EC95g,34310
7
7
  py_ewr/summarise_results.py,sha256=7w2Tbriwob21UXG0N2rSKffneg3M49hWouJPTHVjDAU,29747
8
8
  py_ewr/model_metadata/SiteID_MDBA.csv,sha256=DcwFmBBoLmv1lGik40IwTMSjSBPaDsTt8Nluh2s7wjM,183665
9
9
  py_ewr/model_metadata/SiteID_NSW.csv,sha256=UVBxN43Z5KWCvWhQ5Rh6TNEn35q4_sjPxKyHg8wPFws,6805
10
10
  py_ewr/model_metadata/iqqm_stations.csv,sha256=vl4CPtPslG5VplSzf_yLZulTrmab-mEBHOfzFtS1kf4,110
11
11
  py_ewr/parameter_metadata/ewr_calc_config.json,sha256=l1AgIRlf7UUmk3BNQ4r3kutU48pYHHVKmLELjoB-8rQ,17664
12
12
  py_ewr/parameter_metadata/parameter_sheet.csv,sha256=IiYAvf0hG9fchuwqtfDZhI8WSPB5jgaaaJ0MLwfPYAw,899556
13
- py_ewr-2.2.3.dist-info/LICENSE,sha256=ogEPNDSH0_dhiv_lT3ifVIdgIzHAqNA_SemnxUfPBJk,7048
14
- py_ewr-2.2.3.dist-info/METADATA,sha256=aErpD5tvpcNUARzcX6b0HvNczeN3dOeTDnphfmS12nA,10174
15
- py_ewr-2.2.3.dist-info/WHEEL,sha256=R0nc6qTxuoLk7ShA2_Y-UWkN8ZdfDBG2B6Eqpz2WXbs,91
16
- py_ewr-2.2.3.dist-info/top_level.txt,sha256=n3725d-64Cjyb-YMUMV64UAuIflzUh2_UZSxiIbrur4,7
17
- py_ewr-2.2.3.dist-info/RECORD,,
13
+ py_ewr-2.2.4.dist-info/LICENSE,sha256=ogEPNDSH0_dhiv_lT3ifVIdgIzHAqNA_SemnxUfPBJk,7048
14
+ py_ewr-2.2.4.dist-info/METADATA,sha256=npsbWV01a3qKUUM1eJLRpy8YcsEPpchqI5_hVPTDMa8,10174
15
+ py_ewr-2.2.4.dist-info/WHEEL,sha256=R0nc6qTxuoLk7ShA2_Y-UWkN8ZdfDBG2B6Eqpz2WXbs,91
16
+ py_ewr-2.2.4.dist-info/top_level.txt,sha256=n3725d-64Cjyb-YMUMV64UAuIflzUh2_UZSxiIbrur4,7
17
+ py_ewr-2.2.4.dist-info/RECORD,,
File without changes